summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/asmjit/src
diff options
context:
space:
mode:
Diffstat (limited to '3rdparty/asmjit/src')
-rw-r--r--3rdparty/asmjit/src/asmjit.natvis83
-rw-r--r--3rdparty/asmjit/src/asmjit/asmjit.h58
-rw-r--r--3rdparty/asmjit/src/asmjit/core.h102
-rw-r--r--3rdparty/asmjit/src/asmjit/core/api-build_p.h77
-rw-r--r--3rdparty/asmjit/src/asmjit/core/api-config.h533
-rw-r--r--3rdparty/asmjit/src/asmjit/core/arch.cpp176
-rw-r--r--3rdparty/asmjit/src/asmjit/core/arch.h204
-rw-r--r--3rdparty/asmjit/src/asmjit/core/assembler.cpp514
-rw-r--r--3rdparty/asmjit/src/asmjit/core/assembler.h176
-rw-r--r--3rdparty/asmjit/src/asmjit/core/builder.cpp1004
-rw-r--r--3rdparty/asmjit/src/asmjit/core/builder.h1305
-rw-r--r--3rdparty/asmjit/src/asmjit/core/callconv.cpp59
-rw-r--r--3rdparty/asmjit/src/asmjit/core/callconv.h411
-rw-r--r--3rdparty/asmjit/src/asmjit/core/codebufferwriter_p.h188
-rw-r--r--3rdparty/asmjit/src/asmjit/core/codeholder.cpp1125
-rw-r--r--3rdparty/asmjit/src/asmjit/core/codeholder.h930
-rw-r--r--3rdparty/asmjit/src/asmjit/core/compiler.cpp669
-rw-r--r--3rdparty/asmjit/src/asmjit/core/compiler.h674
-rw-r--r--3rdparty/asmjit/src/asmjit/core/constpool.cpp375
-rw-r--r--3rdparty/asmjit/src/asmjit/core/constpool.h257
-rw-r--r--3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp97
-rw-r--r--3rdparty/asmjit/src/asmjit/core/cpuinfo.h152
-rw-r--r--3rdparty/asmjit/src/asmjit/core/datatypes.h1073
-rw-r--r--3rdparty/asmjit/src/asmjit/core/emitter.cpp272
-rw-r--r--3rdparty/asmjit/src/asmjit/core/emitter.h554
-rw-r--r--3rdparty/asmjit/src/asmjit/core/features.h162
-rw-r--r--3rdparty/asmjit/src/asmjit/core/func.cpp144
-rw-r--r--3rdparty/asmjit/src/asmjit/core/func.h966
-rw-r--r--3rdparty/asmjit/src/asmjit/core/globals.cpp131
-rw-r--r--3rdparty/asmjit/src/asmjit/core/globals.h425
-rw-r--r--3rdparty/asmjit/src/asmjit/core/inst.cpp139
-rw-r--r--3rdparty/asmjit/src/asmjit/core/inst.h469
-rw-r--r--3rdparty/asmjit/src/asmjit/core/jitallocator.cpp1152
-rw-r--r--3rdparty/asmjit/src/asmjit/core/jitallocator.h278
-rw-r--r--3rdparty/asmjit/src/asmjit/core/jitruntime.cpp156
-rw-r--r--3rdparty/asmjit/src/asmjit/core/jitruntime.h126
-rw-r--r--3rdparty/asmjit/src/asmjit/core/logging.cpp535
-rw-r--r--3rdparty/asmjit/src/asmjit/core/logging.h355
-rw-r--r--3rdparty/asmjit/src/asmjit/core/misc_p.h49
-rw-r--r--3rdparty/asmjit/src/asmjit/core/operand.cpp136
-rw-r--r--3rdparty/asmjit/src/asmjit/core/operand.h1337
-rw-r--r--3rdparty/asmjit/src/asmjit/core/osutils.cpp106
-rw-r--r--3rdparty/asmjit/src/asmjit/core/osutils.h139
-rw-r--r--3rdparty/asmjit/src/asmjit/core/raassignment_p.h399
-rw-r--r--3rdparty/asmjit/src/asmjit/core/rabuilders_p.h632
-rw-r--r--3rdparty/asmjit/src/asmjit/core/radefs_p.h1094
-rw-r--r--3rdparty/asmjit/src/asmjit/core/ralocal.cpp1041
-rw-r--r--3rdparty/asmjit/src/asmjit/core/ralocal_p.h281
-rw-r--r--3rdparty/asmjit/src/asmjit/core/rapass.cpp1994
-rw-r--r--3rdparty/asmjit/src/asmjit/core/rapass_p.h1189
-rw-r--r--3rdparty/asmjit/src/asmjit/core/rastack.cpp207
-rw-r--r--3rdparty/asmjit/src/asmjit/core/rastack_p.h183
-rw-r--r--3rdparty/asmjit/src/asmjit/core/string.cpp545
-rw-r--r--3rdparty/asmjit/src/asmjit/core/string.h352
-rw-r--r--3rdparty/asmjit/src/asmjit/core/support.cpp499
-rw-r--r--3rdparty/asmjit/src/asmjit/core/support.h1411
-rw-r--r--3rdparty/asmjit/src/asmjit/core/target.cpp38
-rw-r--r--3rdparty/asmjit/src/asmjit/core/target.h210
-rw-r--r--3rdparty/asmjit/src/asmjit/core/type.cpp44
-rw-r--r--3rdparty/asmjit/src/asmjit/core/type.h398
-rw-r--r--3rdparty/asmjit/src/asmjit/core/virtmem.cpp589
-rw-r--r--3rdparty/asmjit/src/asmjit/core/virtmem.h145
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zone.cpp382
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zone.h642
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonehash.cpp331
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonehash.h217
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonelist.cpp182
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonelist.h203
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonestack.cpp197
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonestack.h234
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonestring.h125
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonetree.cpp118
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonetree.h385
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonevector.cpp375
-rw-r--r--3rdparty/asmjit/src/asmjit/core/zonevector.h699
-rw-r--r--3rdparty/asmjit/src/asmjit/x86.h42
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp4747
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86assembler.h102
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86builder.cpp69
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86builder.h79
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86callconv.cpp163
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86callconv_p.h50
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86compiler.cpp76
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86compiler.h288
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86emitter.h5566
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86features.cpp393
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86features.h286
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86globals.h2039
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp1543
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h59
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp3983
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86instdb.h471
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h318
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86internal.cpp1633
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86internal_p.h87
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86logging.cpp781
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86logging_p.h72
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h452
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86operand.cpp271
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86operand.h1060
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp1172
-rw-r--r--3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h118
102 files changed, 59134 insertions, 0 deletions
diff --git a/3rdparty/asmjit/src/asmjit.natvis b/3rdparty/asmjit/src/asmjit.natvis
new file mode 100644
index 00000000000..18a083c6c12
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit.natvis
@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!-- asmjit visualizer for Visual Studio (natvis) -->
+
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="asmjit::String">
+ <Intrinsic Name="isSmall" Expression="(_type &lt; 0x1F)"/>
+ <DisplayString Condition="isSmall()">{_small.data, s8}</DisplayString>
+ <DisplayString Condition="!isSmall()">{_large.data, s8}</DisplayString>
+ <Expand HideRawView="true">
+ <Synthetic Name="_type">
+ <DisplayString Condition="(_type &lt; 0x1F)">Small</DisplayString>
+ <DisplayString Condition="(_type == 0x1F)">Large</DisplayString>
+ <DisplayString Condition="(_type &gt; 0x1F)">External</DisplayString>
+ </Synthetic>
+ <Item Name="_size" Condition="isSmall()" ExcludeView="simple">(int)_small.type, d</Item>
+ <Item Name="_size" Condition="!isSmall()" ExcludeView="simple">_large.size, d</Item>
+ <Item Name="_capacity" Condition="isSmall()" ExcludeView="simple">asmjit::String::kSSOCapacity, d</Item>
+ <Item Name="_capacity" Condition="!isSmall()" ExcludeView="simple">_large.capacity, d</Item>
+ <Item Name="_data" Condition="isSmall()" ExcludeView="simple">_small.data, s8</Item>
+ <Item Name="_data" Condition="!isSmall()" ExcludeView="simple">_large.data, s8</Item>
+ </Expand>
+ </Type>
+
+ <Type Name="asmjit::ZoneVector&lt;*&gt;">
+ <DisplayString>{{ [size={_size, d} capacity={_capacity, d}] }}</DisplayString>
+ <Expand>
+ <Item Name="_size" ExcludeView="simple">_size, d</Item>
+ <Item Name="_capacity" ExcludeView="simple">_capacity, d</Item>
+ <ArrayItems>
+ <Size>_size</Size>
+ <ValuePointer>(($T1*)_data)</ValuePointer>
+ </ArrayItems>
+ </Expand>
+ </Type>
+
+ <Type Name="asmjit::Operand_">
+ <Intrinsic Name="opType" Expression="(unsigned int)(_signature &amp; 0x7)"/>
+ <Intrinsic Name="opSize" Expression="(_signature &gt;&gt; 24) &amp; 0xFF"/>
+
+ <Intrinsic Name="regType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F"/>
+ <Intrinsic Name="regGroup" Expression="(_signature &gt;&gt; 8) &amp; 0xF"/>
+
+ <Intrinsic Name="memBaseType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F"/>
+ <Intrinsic Name="memIndexType" Expression="(_signature &gt;&gt; 8) &amp; 0x1F"/>
+ <Intrinsic Name="memAddrType" Expression="(_signature &gt;&gt; 13) &amp; 0x3"/>
+ <Intrinsic Name="memRegHome" Expression="(_signature &gt;&gt; 15) &amp; 0x1"/>
+
+ <Intrinsic Name="memBaseId" Expression="_baseId"/>
+ <Intrinsic Name="memIndexId" Expression="_data[0]"/>
+
+ <Intrinsic Name="memOffset32b" Expression="(__int64)int(_data[1])"/>
+ <Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_baseId &lt;&lt; 32) | ((unsigned __int64)_data[1])"/>
+ <Intrinsic Name="memOffset" Expression="memBaseType() != 0 ? memOffset32b() : memOffset64b()"/>
+
+ <Intrinsic Name="immValue" Expression="((__int64)_data[1] &lt;&lt; 32) | (__int64)_data[0]"/>
+
+ <DisplayString Condition="opType() == 0">[None]</DisplayString>
+ <DisplayString Condition="opType() == 1">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
+ <DisplayString Condition="opType() == 2">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
+ <DisplayString Condition="opType() == 3">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
+ <DisplayString Condition="opType() == 4">[Label] {{ id={_baseId} }}</DisplayString>
+ <DisplayString Condition="opType() &gt; 4">[Unknown]</DisplayString>
+ <Expand HideRawView="true">
+ <Item Name="_signature">_signature, X</Item>
+ <Item Name="_signature.any.type">(asmjit::Operand_::OpType)opType()</Item>
+ <Item Name="_signature.any.size">opSize(), d</Item>
+ <Item Name="_signature.reg.type" Condition="opType() == 1">(asmjit::BaseReg::RegType)regType()</Item>
+ <Item Name="_signature.reg.group" Condition="opType() == 1">(asmjit::BaseReg::RegGroup)regGroup()</Item>
+ <Item Name="_signature.mem.baseType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memBaseType()</Item>
+ <Item Name="_signature.mem.indexType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memIndexType()</Item>
+ <Item Name="_signature.mem.addrType" Condition="opType() == 2">(asmjit::BaseMem::AddrType)memAddrType()</Item>
+ <Item Name="_signature.mem.regHome" Condition="opType() == 2">(bool)memRegHome()</Item>
+ <Item Name="_baseId">_baseId</Item>
+ <Item Name="_data[0]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[0]</Item>
+ <Item Name="_data[1]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[1]</Item>
+ <Item Name="_data[IndexId]" Condition="opType() == 2">_data[0]</Item>
+ <Item Name="_data[OffsetLo]" Condition="opType() == 2">_data[1]</Item>
+ <Item Name="_data[ImmHi]" Condition="opType() == 3">_data[0]</Item>
+ <Item Name="_data[ImmLo]" Condition="opType() == 3">_data[1]</Item>
+ </Expand>
+ </Type>
+</AutoVisualizer>
diff --git a/3rdparty/asmjit/src/asmjit/asmjit.h b/3rdparty/asmjit/src/asmjit/asmjit.h
new file mode 100644
index 00000000000..e543a633d04
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/asmjit.h
@@ -0,0 +1,58 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_ASMJIT_H_INCLUDED
+#define ASMJIT_ASMJIT_H_INCLUDED
+
+//! \mainpage API Reference
+//!
+//! AsmJit C++ API reference documentation generated by Doxygen.
+//!
+//! Introduction provided by the project page at https://github.com/asmjit/asmjit.
+//!
+//! \section main_groups Groups
+//!
+//! The documentation is split into the following groups:
+//!
+//! $$DOCS_GROUP_OVERVIEW$$
+//!
+//! \section main_other Other Pages
+//!
+//! - <a href="annotated.html">Class List</a> - List of classes sorted alphabetically
+//! - <a href="namespaceasmjit.html">AsmJit Namespace</a> - List of symbols provided by `asmjit` namespace
+
+//! \namespace asmjit
+//!
+//! Root namespace used by AsmJit.
+
+#include "./core.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "./x86.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "./arm.h"
+#endif
+
+#endif // ASMJIT_ASMJIT_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core.h b/3rdparty/asmjit/src/asmjit/core.h
new file mode 100644
index 00000000000..f9a56fc51fb
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core.h
@@ -0,0 +1,102 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_H_INCLUDED
+#define ASMJIT_CORE_H_INCLUDED
+
+//! \defgroup asmjit_core Core
+//! \brief Core API.
+//!
+//! API that provides classes and functions not specific to any architecture.
+
+//! \defgroup asmjit_builder Builder
+//! \brief Builder API.
+//!
+//! Both Builder and Compiler are emitters that emit everything to a representation
+//! that allows further processing. The code stored in such representation is
+//! completely safe to be patched, simplified, reordered, obfuscated, removed,
+//! injected, analyzed, or processed some other way. Each instruction, label,
+//! directive, or other building block is stored as \ref BaseNode (or derived
+//! class like \ref InstNode or \ref LabelNode) and contains all the information
+//! necessary to pass that node later to the Assembler.
+
+//! \defgroup asmjit_compiler Compiler
+//! \brief Compiler API.
+//!
+//! Compiler tool is built on top of a \ref asmjit_builder API and adds register
+//! allocation and support for defining and calling functions into it. At the
+//! moment it's the easiest way to generate some code as most architecture and
+//! OS specific stuff is properly abstracted, however, abstractions also mean
+//! that not everything is possible with the Compiler.
+
+//! \defgroup asmjit_func Function
+//! \brief Function API.
+
+//! \defgroup asmjit_jit JIT
+//! \brief JIT API and Virtual Memory Management.
+
+//! \defgroup asmjit_zone Zone
+//! \brief Zone allocator and zone allocated containers.
+
+//! \defgroup asmjit_support Support
+//! \brief Support API.
+
+//! \cond INTERNAL
+//! \defgroup asmjit_ra RA
+//! \brief Register allocator internals.
+//! \endcond
+
+#include "./core/globals.h"
+
+#include "./core/arch.h"
+#include "./core/assembler.h"
+#include "./core/builder.h"
+#include "./core/callconv.h"
+#include "./core/codeholder.h"
+#include "./core/compiler.h"
+#include "./core/constpool.h"
+#include "./core/cpuinfo.h"
+#include "./core/datatypes.h"
+#include "./core/emitter.h"
+#include "./core/features.h"
+#include "./core/func.h"
+#include "./core/inst.h"
+#include "./core/jitallocator.h"
+#include "./core/jitruntime.h"
+#include "./core/logging.h"
+#include "./core/operand.h"
+#include "./core/osutils.h"
+#include "./core/string.h"
+#include "./core/support.h"
+#include "./core/target.h"
+#include "./core/type.h"
+#include "./core/virtmem.h"
+#include "./core/zone.h"
+#include "./core/zonehash.h"
+#include "./core/zonelist.h"
+#include "./core/zonetree.h"
+#include "./core/zonestack.h"
+#include "./core/zonestring.h"
+#include "./core/zonevector.h"
+
+#endif // ASMJIT_CORE_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/api-build_p.h b/3rdparty/asmjit/src/asmjit/core/api-build_p.h
new file mode 100644
index 00000000000..714107c204d
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/api-build_p.h
@@ -0,0 +1,77 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
+
+#define ASMJIT_EXPORTS
+
+// Only turn-off these warnings when building asmjit itself.
+#ifdef _MSC_VER
+ #ifndef _CRT_SECURE_NO_DEPRECATE
+ #define _CRT_SECURE_NO_DEPRECATE
+ #endif
+ #ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+ #endif
+#endif
+
+// Dependencies only required for asmjit build, but never exposed through public headers.
+#ifdef _WIN32
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #endif
+ #ifndef NOMINMAX
+ #define NOMINMAX
+ #endif
+ #include <windows.h>
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build-Only]
+// ============================================================================
+
+#include "./api-config.h"
+
+#if !defined(ASMJIT_BUILD_DEBUG) && ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 4, 0)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))
+ #define ASMJIT_FAVOR_SPEED __attribute__((__optimize__("O3")))
+#elif ASMJIT_CXX_HAS_ATTRIBUTE(__minsize__, 0)
+ #define ASMJIT_FAVOR_SIZE __attribute__((__minsize__))
+ #define ASMJIT_FAVOR_SPEED
+#else
+ #define ASMJIT_FAVOR_SIZE
+ #define ASMJIT_FAVOR_SPEED
+#endif
+
+// Make sure '#ifdef'ed unit tests are properly highlighted in IDE.
+#if !defined(ASMJIT_TEST) && defined(__INTELLISENSE__)
+ #define ASMJIT_TEST
+#endif
+
+// Include a unit testing package if this is a `asmjit_test_unit` build.
+#if defined(ASMJIT_TEST)
+ #include "../../../test/broken.h"
+#endif
+
+#endif // ASMJIT_CORE_API_BUILD_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/api-config.h b/3rdparty/asmjit/src/asmjit/core/api-config.h
new file mode 100644
index 00000000000..16199a9f41e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/api-config.h
@@ -0,0 +1,533 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
+#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
+
+// ============================================================================
+// [asmjit::Version]
+// ============================================================================
+
+#define ASMJIT_LIBRARY_VERSION 0x010200 /* 1.2.0 */
+
+// ============================================================================
+// [asmjit::Options]
+// ============================================================================
+
+// AsmJit Static Builds and Embedding
+// ----------------------------------
+//
+// These definitions can be used to enable static library build. Embed is used
+// when AsmJit's source code is embedded directly in another project, implies
+// static build as well.
+//
+// #define ASMJIT_EMBED // Asmjit is embedded (implies ASMJIT_BUILD_STATIC).
+#define ASMJIT_STATIC // Enable static-library build.
+
+// AsmJit Build Mode
+// -----------------
+//
+// These definitions control the build mode and tracing support. The build mode
+// should be auto-detected at compile time, but it's possible to override it in
+// case that the auto-detection fails.
+//
+// Tracing is a feature that is never compiled by default and it's only used to
+// debug AsmJit itself.
+//
+// #define ASMJIT_BUILD_DEBUG // Always use debug-mode (ASMJIT_ASSERT enabled).
+// #define ASMJIT_BUILD_RELEASE // Always use release-mode (ASMJIT_ASSERT disabled).
+
+// AsmJit Build Backends
+// ---------------------
+//
+// These definitions control which backends to compile. If none of these is
+// defined AsmJit will use host architecture by default (for JIT code generation).
+//
+// #define ASMJIT_BUILD_X86 // Enable X86 targets (X86 and X86_64).
+// #define ASMJIT_BUILD_ARM // Enable ARM targets (ARM and AArch64).
+// #define ASMJIT_BUILD_HOST // Enable targets based on target arch (default).
+
+// AsmJit Build Options
+// --------------------
+//
+// Flags can be defined to disable standard features. These are handy especially
+// when building AsmJit statically and some features are not needed or unwanted
+// (like BaseCompiler).
+//
+// AsmJit features are enabled by default.
+// #define ASMJIT_NO_BUILDER // Disable Builder (completely).
+// #define ASMJIT_NO_COMPILER // Disable Compiler (completely).
+// #define ASMJIT_NO_JIT // Disable JIT memory manager and JitRuntime.
+// #define ASMJIT_NO_LOGGING // Disable logging and formatting (completely).
+// #define ASMJIT_NO_TEXT // Disable everything that contains text
+// // representation (instructions, errors, ...).
+// #define ASMJIT_NO_VALIDATION // Disable validation API and options.
+// #define ASMJIT_NO_INTROSPECTION // Disable API related to instruction database.
+// // (validation, cpu features, rw-info, etc).
+
+// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
+#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
+ #define ASMJIT_NO_COMPILER
+#endif
+
+// Prevent compile-time errors caused by misconfiguration.
+#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
+ #pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
+ #undef ASMJIT_NO_TEXT
+#endif
+
+#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
+ #pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
+ #undef ASMJIT_NO_INTROSPECTION
+#endif
+
+// ============================================================================
+// [asmjit::Dependencies]
+// ============================================================================
+
+// We really want std-types as globals.
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <new>
+#include <limits>
+#include <type_traits>
+#include <utility>
+
+#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
+ #include <pthread.h>
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Deprecated]
+// ============================================================================
+
+// DEPRECATED: Will be removed in the future.
+#if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
+ #if defined(ASMJIT_BUILD_EMBED)
+ #pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+ #if defined(ASMJIT_BUILD_STATIC)
+ #pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
+ #endif
+
+ #if !defined(ASMJIT_STATIC)
+ #define ASMJIT_STATIC
+ #endif
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Build Mode]
+// ============================================================================
+
+// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
+#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
+ #if !defined(NDEBUG)
+ #define ASMJIT_BUILD_DEBUG
+ #else
+ #define ASMJIT_BUILD_RELEASE
+ #endif
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Target Architecture]
+// ============================================================================
+
+#if defined(_M_X64) || defined(__x86_64__)
+ #define ASMJIT_ARCH_X86 64
+#elif defined(_M_IX86) || defined(__X86__) || defined(__i386__)
+ #define ASMJIT_ARCH_X86 32
+#else
+ #define ASMJIT_ARCH_X86 0
+#endif
+
+#if defined(__arm64__) || defined(__aarch64__)
+# define ASMJIT_ARCH_ARM 64
+#elif defined(_M_ARM) || defined(_M_ARMT) || defined(__arm__) || defined(__thumb__) || defined(__thumb2__)
+ #define ASMJIT_ARCH_ARM 32
+#else
+ #define ASMJIT_ARCH_ARM 0
+#endif
+
+#if defined(_MIPS_ARCH_MIPS64) || defined(__mips64)
+ #define ASMJIT_ARCH_MIPS 64
+#elif defined(_MIPS_ARCH_MIPS32) || defined(_M_MRX000) || defined(__mips__)
+ #define ASMJIT_ARCH_MIPS 32
+#else
+ #define ASMJIT_ARCH_MIPS 0
+#endif
+
+#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS)
+#if ASMJIT_ARCH_BITS == 0
+ #undef ASMJIT_ARCH_BITS
+ #if defined (__LP64__) || defined(_LP64)
+ #define ASMJIT_ARCH_BITS 64
+ #else
+ #define ASMJIT_ARCH_BITS 32
+ #endif
+#endif
+
+#if (defined(__ARMEB__)) || \
+ (defined(__MIPSEB__)) || \
+ (defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
+ #define ASMJIT_ARCH_LE 0
+ #define ASMJIT_ARCH_BE 1
+#else
+ #define ASMJIT_ARCH_LE 1
+ #define ASMJIT_ARCH_BE 0
+#endif
+
+// Build host architecture if no architecture is selected.
+#if !defined(ASMJIT_BUILD_HOST) && \
+ !defined(ASMJIT_BUILD_X86) && \
+ !defined(ASMJIT_BUILD_ARM)
+ #define ASMJIT_BUILD_HOST
+#endif
+
+// Detect host architecture if building only for host.
+#if ASMJIT_ARCH_X86 && defined(ASMJIT_BUILD_HOST) && !defined(ASMJIT_BUILD_X86)
+ #define ASMJIT_BUILD_X86
+#endif
+
+#if ASMJIT_ARCH_ARM && defined(ASMJIT_BUILD_HOST) && !defined(ASMJIT_BUILD_ARM)
+ #define ASMJIT_BUILD_ARM
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - C++ Compiler and Features Detection]
+// ============================================================================
+
+#define ASMJIT_CXX_CLANG 0
+#define ASMJIT_CXX_GNU 0
+#define ASMJIT_CXX_INTEL 0
+#define ASMJIT_CXX_MSC 0
+#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR, PATCH) ((MAJOR) * 10000000 + (MINOR) * 100000 + (PATCH))
+
+// Intel Compiler [pretends to be GNU or MSC, so it must be checked first]:
+// - https://software.intel.com/en-us/articles/c0x-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c14-features-supported-by-intel-c-compiler
+// - https://software.intel.com/en-us/articles/c17-features-supported-by-intel-c-compiler
+#if defined(__INTEL_COMPILER)
+
+ #undef ASMJIT_CXX_INTEL
+ #define ASMJIT_CXX_INTEL ASMJIT_CXX_MAKE_VER(__INTEL_COMPILER / 100, (__INTEL_COMPILER / 10) % 10, __INTEL_COMPILER % 10)
+
+// MSC Compiler:
+// - https://msdn.microsoft.com/en-us/library/hh567368.aspx
+//
+// Version List:
+// - 16.00.0 == VS2010
+// - 17.00.0 == VS2012
+// - 18.00.0 == VS2013
+// - 19.00.0 == VS2015
+// - 19.10.0 == VS2017
+#elif defined(_MSC_VER) && defined(_MSC_FULL_VER)
+
+ #undef ASMJIT_CXX_MSC
+ #if _MSC_VER == _MSC_FULL_VER / 10000
+ #define ASMJIT_CXX_MSC ASMJIT_CXX_MAKE_VER(_MSC_VER / 100, _MSC_VER % 100, _MSC_FULL_VER % 10000)
+ #else
+ #define ASMJIT_CXX_MSC ASMJIT_CXX_MAKE_VER(_MSC_VER / 100, (_MSC_FULL_VER / 100000) % 100, _MSC_FULL_VER % 100000)
+ #endif
+
+// Clang Compiler [Pretends to be GNU, so it must be checked before]:
+// - https://clang.llvm.org/cxx_status.html
+#elif defined(__clang_major__) && defined(__clang_minor__) && defined(__clang_patchlevel__)
+
+ #undef ASMJIT_CXX_CLANG
+ #define ASMJIT_CXX_CLANG ASMJIT_CXX_MAKE_VER(__clang_major__, __clang_minor__, __clang_patchlevel__)
+
+// GNU Compiler:
+// - https://gcc.gnu.org/projects/cxx-status.html
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+
+ #undef ASMJIT_CXX_GNU
+ #define ASMJIT_CXX_GNU ASMJIT_CXX_MAKE_VER(__GNUC__, __GNUC_MINOR__, __GNUC_PATCHLEVEL__)
+
+#endif
+
+// Compiler features detection macros.
+#if ASMJIT_CXX_CLANG && defined(__has_builtin)
+ #define ASMJIT_CXX_HAS_BUILTIN(NAME, CHECK) (__has_builtin(NAME))
+#else
+ #define ASMJIT_CXX_HAS_BUILTIN(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+#if ASMJIT_CXX_CLANG && defined(__has_extension)
+ #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (__has_extension(NAME))
+#elif ASMJIT_CXX_CLANG && defined(__has_feature)
+ #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (__has_feature(NAME))
+#else
+ #define ASMJIT_CXX_HAS_FEATURE(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+#if ASMJIT_CXX_CLANG && defined(__has_attribute)
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (__has_attribute(NAME))
+#else
+ #define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+#if ASMJIT_CXX_CLANG && defined(__has_cpp_attribute)
+ #define ASMJIT_CXX_HAS_CPP_ATTRIBUTE(NAME, CHECK) (__has_cpp_attribute(NAME))
+#else
+ #define ASMJIT_CXX_HAS_CPP_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
+#endif
+
+// Compiler features by vendor.
+#if defined(_MSC_VER) && !defined(_NATIVE_WCHAR_T_DEFINED)
+ #define ASMJIT_CXX_HAS_NATIVE_WCHAR_T 0
+#else
+ #define ASMJIT_CXX_HAS_NATIVE_WCHAR_T 1
+#endif
+
+#if ASMJIT_CXX_HAS_FEATURE(cxx_unicode_literals, ( \
+ (ASMJIT_CXX_INTEL >= ASMJIT_CXX_MAKE_VER(14, 0, 0)) || \
+ (ASMJIT_CXX_MSC >= ASMJIT_CXX_MAKE_VER(19, 0, 0)) || \
+ (ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4 , 5, 0) && __cplusplus >= 201103L) ))
+ #define ASMJIT_CXX_HAS_UNICODE_LITERALS 1
+#else
+ #define ASMJIT_CXX_HAS_UNICODE_LITERALS 0
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - API Decorators & Language Extensions]
+// ============================================================================
+
+// API (Export / Import).
+#if !defined(ASMJIT_STATIC)
+ #if defined(_WIN32) && (defined(_MSC_VER) || defined(__MINGW32__))
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __declspec(dllexport)
+ #else
+ #define ASMJIT_API __declspec(dllimport)
+ #endif
+ #elif defined(_WIN32) && defined(__GNUC__)
+ #ifdef ASMJIT_EXPORTS
+ #define ASMJIT_API __attribute__((__dllexport__))
+ #else
+ #define ASMJIT_API __attribute__((__dllimport__))
+ #endif
+ #elif defined(__GNUC__)
+ #define ASMJIT_API __attribute__((__visibility__("default")))
+ #endif
+#endif
+
+#if !defined(ASMJIT_API)
+ #define ASMJIT_API
+#endif
+
+#if !defined(ASMJIT_VARAPI)
+ #define ASMJIT_VARAPI extern ASMJIT_API
+#endif
+
+// This is basically a workaround. When using MSVC and marking class as DLL
+// export everything gets exported, which is unwanted in most projects. MSVC
+// automatically exports typeinfo and vtable if at least one symbol of the
+// class is exported. However, GCC has some strange behavior that even if
+// one or more symbol is exported it doesn't export typeinfo unless the
+// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
+#if !defined(_WIN32) && defined(__GNUC__)
+ #define ASMJIT_VIRTAPI ASMJIT_API
+#else
+ #define ASMJIT_VIRTAPI
+#endif
+
+// Function attributes.
+#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
+ #define ASMJIT_INLINE inline __attribute__((__always_inline__))
+#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
+ #define ASMJIT_INLINE __forceinline
+#else
+ #define ASMJIT_INLINE inline
+#endif
+
+#if defined(__GNUC__)
+ #define ASMJIT_NOINLINE __attribute__((__noinline__))
+ #define ASMJIT_NORETURN __attribute__((__noreturn__))
+#elif defined(_MSC_VER)
+ #define ASMJIT_NOINLINE __declspec(noinline)
+ #define ASMJIT_NORETURN __declspec(noreturn)
+#else
+ #define ASMJIT_NOINLINE
+ #define ASMJIT_NORETURN
+#endif
+
+// Calling conventions.
+#if ASMJIT_ARCH_X86 == 32 && defined(__GNUC__)
+ #define ASMJIT_CDECL __attribute__((__cdecl__))
+ #define ASMJIT_STDCALL __attribute__((__stdcall__))
+ #define ASMJIT_FASTCALL __attribute__((__fastcall__))
+ #define ASMJIT_REGPARM(N) __attribute__((__regparm__(N)))
+#elif ASMJIT_ARCH_X86 == 32 && defined(_MSC_VER)
+ #define ASMJIT_CDECL __cdecl
+ #define ASMJIT_STDCALL __stdcall
+ #define ASMJIT_FASTCALL __fastcall
+ #define ASMJIT_REGPARM(N)
+#else
+ #define ASMJIT_CDECL
+ #define ASMJIT_STDCALL
+ #define ASMJIT_FASTCALL
+ #define ASMJIT_REGPARM(N)
+#endif
+
+// Type alignment (not allowed by C++11 'alignas' keyword).
+#if defined(__GNUC__)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __attribute__((__aligned__(N))) TYPE
+#elif defined(_MSC_VER)
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) __declspec(align(N)) TYPE
+#else
+ #define ASMJIT_ALIGN_TYPE(TYPE, N) TYPE
+#endif
+
+#if defined(__GNUC__)
+ #define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
+#else
+ #define ASMJIT_MAY_ALIAS
+#endif
+
+// Annotations.
+#if defined(__GNUC__)
+ #define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
+ #define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
+#else
+ #define ASMJIT_LIKELY(...) (__VA_ARGS__)
+ #define ASMJIT_UNLIKELY(...) (__VA_ARGS__)
+#endif
+
+#if defined(__clang__) && __cplusplus >= 201103L
+ #define ASMJIT_FALLTHROUGH [[clang::fallthrough]]
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(7, 0, 0)
+ #define ASMJIT_FALLTHROUGH __attribute__((__fallthrough__))
+#else
+ #define ASMJIT_FALLTHROUGH ((void)0) /* fallthrough */
+#endif
+
+// Utilities.
+#define ASMJIT_OFFSET_OF(STRUCT, MEMBER) ((int)(intptr_t)((const char*)&((const STRUCT*)0x100)->MEMBER) - 0x100)
+#define ASMJIT_ARRAY_SIZE(X) uint32_t(sizeof(X) / sizeof(X[0]))
+
+#if ASMJIT_CXX_HAS_ATTRIBUTE(attribute_deprecated_with_message, ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 5, 0))
+ #define ASMJIT_DEPRECATED(DECL, MESSAGE) DECL __attribute__((__deprecated__(MESSAGE)))
+#elif ASMJIT_MSC
+ #define ASMJIT_DEPRECATED(DECL, MESSAGE) __declspec(deprecated(MESSAGE)) DECL
+#else
+ #define ASMJIT_DEPRECATED(DECL, MESSAGE) DECL
+#endif
+
+#if ASMJIT_CXX_HAS_ATTRIBUTE(no_sanitize, 0)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize__("undefined")))
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 9, 0)
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF __attribute__((__no_sanitize_undefined__))
+#else
+ #define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
+#endif
+
+// ============================================================================
+// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
+// ============================================================================
+
+#if defined(__clang__)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
+ _Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("clang diagnostic pop") \
+ }
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(4, 0, 0) && \
+ ASMJIT_CXX_GNU < ASMJIT_CXX_MAKE_VER(5, 0, 0)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif ASMJIT_CXX_GNU >= ASMJIT_CXX_MAKE_VER(8, 0, 0)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
+ #define ASMJIT_END_NAMESPACE \
+ _Pragma("GCC diagnostic pop") \
+ }
+#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+ #define ASMJIT_BEGIN_NAMESPACE \
+ namespace asmjit { \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: 4127)) /* conditional expression is constant*/\
+ __pragma(warning(disable: 4201)) /* nameless struct/union */
+ #define ASMJIT_END_NAMESPACE \
+ __pragma(warning(pop)) \
+ }
+#endif
+
+#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
+ #define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
+ #define ASMJIT_END_NAMESPACE }
+#endif
+
+#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
+ ASMJIT_BEGIN_NAMESPACE \
+ namespace NAMESPACE {
+
+#define ASMJIT_END_SUB_NAMESPACE \
+ } \
+ ASMJIT_END_NAMESPACE
+
+// ============================================================================
+// [asmjit::Build - Globals - Utilities]
+// ============================================================================
+
+#define ASMJIT_NONCOPYABLE(...) \
+ private: \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+#define ASMJIT_NONCONSTRUCTIBLE(...) \
+ private: \
+ __VA_ARGS__() = delete; \
+ __VA_ARGS__(const __VA_ARGS__& other) = delete; \
+ __VA_ARGS__& operator=(const __VA_ARGS__& other) = delete; \
+ public:
+
+// ============================================================================
+// [asmjit::Build - Globals - Cleanup]
+// ============================================================================
+
+// Try to cleanup things not used in other public headers.
+#ifndef ASMJIT_EXPORTS
+ #undef ASMJIT_CXX_CLANG
+ #undef ASMJIT_CXX_GNU
+ #undef ASMJIT_CXX_INTEL
+ #undef ASMJIT_CXX_MSC
+ #undef ASMJIT_CXX_MAKE_VER
+#endif
+
+#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/arch.cpp b/3rdparty/asmjit/src/asmjit/core/arch.cpp
new file mode 100644
index 00000000000..97fca9d5c47
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/arch.cpp
@@ -0,0 +1,176 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86operand.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armoperand.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ArchInfo]
+// ============================================================================
+
+// NOTE: Keep `const constexpr` otherwise MSC would not compile this code correctly.
+static const constexpr uint32_t archInfoTable[] = {
+ // <--------------------+---------------------+-------------------+-------+
+ // | Type | SubType | GPInfo|
+ // <--------------------+---------------------+-------------------+-------+
+ Support::bytepack32_4x8(ArchInfo::kIdNone , ArchInfo::kSubIdNone, 0, 0),
+ Support::bytepack32_4x8(ArchInfo::kIdX86 , ArchInfo::kSubIdNone, 4, 8),
+ Support::bytepack32_4x8(ArchInfo::kIdX64 , ArchInfo::kSubIdNone, 8, 16),
+ Support::bytepack32_4x8(ArchInfo::kIdA32 , ArchInfo::kSubIdNone, 4, 16),
+ Support::bytepack32_4x8(ArchInfo::kIdA64 , ArchInfo::kSubIdNone, 8, 32)
+};
+
+ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t id, uint32_t subId) noexcept {
+ uint32_t index = id < ASMJIT_ARRAY_SIZE(archInfoTable) ? id : uint32_t(0);
+
+ // Make sure the `archInfoTable` array is correctly indexed.
+ _signature = archInfoTable[index];
+ ASMJIT_ASSERT(_id == index);
+
+ // Even if the architecture is not known we setup its id and sub-id,
+ // however, such architecture is not really useful.
+ _id = uint8_t(id);
+ _subId = uint8_t(subId);
+}
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t archId, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept {
+ uint32_t typeId = typeIdInOut;
+
+ // Zero the signature so it's clear in case that typeId is not invalid.
+ regInfo._signature = 0;
+
+ // TODO: Move to X86 backend.
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId)) {
+ // Passed RegType instead of TypeId?
+ if (typeId <= BaseReg::kTypeMax)
+ typeId = x86::opData.archRegs.regTypeToTypeId[typeId];
+
+ if (ASMJIT_UNLIKELY(!Type::isValid(typeId)))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ // First normalize architecture dependent types.
+ if (Type::isAbstract(typeId)) {
+ if (typeId == Type::kIdIntPtr)
+ typeId = (archId == ArchInfo::kIdX86) ? Type::kIdI32 : Type::kIdI64;
+ else
+ typeId = (archId == ArchInfo::kIdX86) ? Type::kIdU32 : Type::kIdU64;
+ }
+
+ // Type size helps to construct all groupss of registers. If the size is zero
+ // then the TypeId is invalid.
+ uint32_t size = Type::sizeOf(typeId);
+ if (ASMJIT_UNLIKELY(!size))
+ return DebugUtils::errored(kErrorInvalidTypeId);
+
+ if (ASMJIT_UNLIKELY(typeId == Type::kIdF80))
+ return DebugUtils::errored(kErrorInvalidUseOfF80);
+
+ uint32_t regType = 0;
+
+ switch (typeId) {
+ case Type::kIdI8:
+ case Type::kIdU8:
+ regType = x86::Reg::kTypeGpbLo;
+ break;
+
+ case Type::kIdI16:
+ case Type::kIdU16:
+ regType = x86::Reg::kTypeGpw;
+ break;
+
+ case Type::kIdI32:
+ case Type::kIdU32:
+ regType = x86::Reg::kTypeGpd;
+ break;
+
+ case Type::kIdI64:
+ case Type::kIdU64:
+ if (archId == ArchInfo::kIdX86)
+ return DebugUtils::errored(kErrorInvalidUseOfGpq);
+
+ regType = x86::Reg::kTypeGpq;
+ break;
+
+ // F32 and F64 are always promoted to use vector registers.
+ case Type::kIdF32:
+ typeId = Type::kIdF32x1;
+ regType = x86::Reg::kTypeXmm;
+ break;
+
+ case Type::kIdF64:
+ typeId = Type::kIdF64x1;
+ regType = x86::Reg::kTypeXmm;
+ break;
+
+ // Mask registers {k}.
+ case Type::kIdMask8:
+ case Type::kIdMask16:
+ case Type::kIdMask32:
+ case Type::kIdMask64:
+ regType = x86::Reg::kTypeKReg;
+ break;
+
+ // MMX registers.
+ case Type::kIdMmx32:
+ case Type::kIdMmx64:
+ regType = x86::Reg::kTypeMm;
+ break;
+
+ // XMM|YMM|ZMM registers.
+ default:
+ if (size <= 16)
+ regType = x86::Reg::kTypeXmm;
+ else if (size == 32)
+ regType = x86::Reg::kTypeYmm;
+ else
+ regType = x86::Reg::kTypeZmm;
+ break;
+ }
+
+ typeIdInOut = typeId;
+ regInfo._signature = x86::opData.archRegs.regInfo[regType].signature();
+ return kErrorOk;
+ }
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/arch.h b/3rdparty/asmjit/src/asmjit/core/arch.h
new file mode 100644
index 00000000000..b0a27fd9cd5
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/arch.h
@@ -0,0 +1,204 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ARCH_H_INCLUDED
+#define ASMJIT_CORE_ARCH_H_INCLUDED
+
+#include "../core/globals.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::ArchInfo]
+// ============================================================================
+
+class ArchInfo {
+public:
+ union {
+ struct {
+ //! Architecture id.
+ uint8_t _id;
+ //! Architecture sub-id.
+ uint8_t _subId;
+ //! Default size of a general purpose register.
+ uint8_t _gpSize;
+ //! Count of all general purpose registers.
+ uint8_t _gpCount;
+ };
+ //! Architecture signature (32-bit int).
+ uint32_t _signature;
+ };
+
+ //! Architecture id.
+ enum Id : uint32_t {
+ kIdNone = 0, //!< No/Unknown architecture.
+
+ // X86 architectures.
+ kIdX86 = 1, //!< X86 architecture (32-bit).
+ kIdX64 = 2, //!< X64 architecture (64-bit) (AMD64).
+
+ // ARM architectures.
+ kIdA32 = 3, //!< ARM 32-bit architecture (AArch32/ARM/THUMB).
+ kIdA64 = 4, //!< ARM 64-bit architecture (AArch64).
+
+ //! Architecture detected at compile-time (architecture of the host).
+ kIdHost = ASMJIT_ARCH_X86 == 32 ? kIdX86 :
+ ASMJIT_ARCH_X86 == 64 ? kIdX64 :
+ ASMJIT_ARCH_ARM == 32 ? kIdA32 :
+ ASMJIT_ARCH_ARM == 64 ? kIdA64 : kIdNone
+ };
+
+ //! Architecture sub-type or execution mode.
+ enum SubType : uint32_t {
+ kSubIdNone = 0, //!< Default mode (or no specific mode).
+
+ // X86 sub-types.
+ kSubIdX86_AVX = 1, //!< Code generation uses AVX by default (VEC instructions).
+ kSubIdX86_AVX2 = 2, //!< Code generation uses AVX2 by default (VEC instructions).
+ kSubIdX86_AVX512 = 3, //!< Code generation uses AVX-512F by default (+32 vector regs).
+ kSubIdX86_AVX512VL = 4, //!< Code generation uses AVX-512F-VL by default (+VL extensions).
+
+ // ARM sub-types.
+ kSubIdA32_Thumb = 8, //!< THUMB|THUMBv2 sub-type (only ARM in 32-bit mode).
+
+#if (ASMJIT_ARCH_X86) && defined(__AVX512VL__)
+ kSubIdHost = kSubIdX86_AVX512VL
+#elif (ASMJIT_ARCH_X86) && defined(__AVX512F__)
+ kSubIdHost = kSubIdX86_AVX512
+#elif (ASMJIT_ARCH_X86) && defined(__AVX2__)
+ kSubIdHost = kSubIdX86_AVX2
+#elif (ASMJIT_ARCH_X86) && defined(__AVX__)
+ kSubIdHost = kSubIdX86_AVX
+#elif (ASMJIT_ARCH_ARM == 32) && (defined(_M_ARMT) || defined(__thumb__) || defined(__thumb2__))
+ kSubIdHost = kSubIdA32_Thumb
+#else
+ kSubIdHost = 0
+#endif
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ArchInfo() noexcept : _signature(0) {}
+ inline ArchInfo(const ArchInfo& other) noexcept : _signature(other._signature) {}
+ inline explicit ArchInfo(uint32_t type, uint32_t subType = kSubIdNone) noexcept { init(type, subType); }
+ inline explicit ArchInfo(Globals::NoInit_) noexcept {}
+
+ inline static ArchInfo host() noexcept { return ArchInfo(kIdHost, kSubIdHost); }
+
+ inline bool isInitialized() const noexcept { return _id != kIdNone; }
+
+ ASMJIT_API void init(uint32_t type, uint32_t subType = kSubIdNone) noexcept;
+ inline void reset() noexcept { _signature = 0; }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline ArchInfo& operator=(const ArchInfo& other) noexcept = default;
+
+ inline bool operator==(const ArchInfo& other) const noexcept { return _signature == other._signature; }
+ inline bool operator!=(const ArchInfo& other) const noexcept { return _signature != other._signature; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the architecture id, see `Id`.
+ inline uint32_t archId() const noexcept { return _id; }
+
+ //! Returns the architecture sub-id, see `SubType`.
+ //!
+ //! X86 & X64
+ //! ---------
+ //!
+ //! Architecture subtype describe the highest instruction-set level that can
+ //! be used.
+ //!
+ //! A32 & A64
+ //! ---------
+ //!
+ //! Architecture mode means the instruction encoding to be used when generating
+ //! machine code, thus mode can be used to force generation of THUMB and THUMBv2
+ //! encoding or regular ARM encoding.
+ inline uint32_t archSubId() const noexcept { return _subId; }
+
+ //! Tests whether this architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return _gpSize == 4; }
+ //! Tests whether this architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return _gpSize == 8; }
+
+ //! Tests whether this architecture is X86, X64.
+ inline bool isX86Family() const noexcept { return isX86Family(_id); }
+ //! Tests whether this architecture is ARM32 or ARM64.
+ inline bool isArmFamily() const noexcept { return isArmFamily(_id); }
+
+ //! Returns the native size of a general-purpose register.
+ inline uint32_t gpSize() const noexcept { return _gpSize; }
+ //! Returns number of general-purpose registers.
+ inline uint32_t gpCount() const noexcept { return _gpCount; }
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ static inline bool isX86Family(uint32_t archId) noexcept { return archId >= kIdX86 && archId <= kIdX64; }
+ static inline bool isArmFamily(uint32_t archId) noexcept { return archId >= kIdA32 && archId <= kIdA64; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ArchRegs]
+// ============================================================================
+
+//! Information about all architecture registers.
+struct ArchRegs {
+ //! Register information and signatures indexed by `BaseReg::RegType`.
+ RegInfo regInfo[BaseReg::kTypeMax + 1];
+ //! Count (maximum) of registers per `BaseReg::RegType`.
+ uint8_t regCount[BaseReg::kTypeMax + 1];
+ //! Converts RegType to TypeId, see `Type::Id`.
+ uint8_t regTypeToTypeId[BaseReg::kTypeMax + 1];
+};
+
+// ============================================================================
+// [asmjit::ArchUtils]
+// ============================================================================
+
+struct ArchUtils {
+ ASMJIT_API static Error typeIdToRegInfo(uint32_t archId, uint32_t& typeIdInOut, RegInfo& regInfo) noexcept;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ARCH_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/assembler.cpp b/3rdparty/asmjit/src/asmjit/core/assembler.cpp
new file mode 100644
index 00000000000..35c39ab6089
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/assembler.cpp
@@ -0,0 +1,514 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/codebufferwriter_p.h"
+#include "../core/constpool.h"
+#include "../core/logging.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseAssembler - Construction / Destruction]
+// ============================================================================
+
+BaseAssembler::BaseAssembler() noexcept
+ : BaseEmitter(kTypeAssembler),
+ _section(nullptr),
+ _bufferData(nullptr),
+ _bufferEnd(nullptr),
+ _bufferPtr(nullptr),
+ _op4(),
+ _op5() {}
+BaseAssembler::~BaseAssembler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Buffer Management]
+// ============================================================================
+
+Error BaseAssembler::setOffset(size_t offset) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
+ if (ASMJIT_UNLIKELY(offset > size))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ _bufferPtr = _bufferData + offset;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+static void BaseAssembler_logLabel(BaseAssembler* self, const Label& label) noexcept {
+ Logger* logger = self->_code->_logger;
+
+ StringTmp<512> sb;
+ size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : std::numeric_limits<size_t>::max();
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
+ Logging::formatLabel(sb, logger->flags(), self, label.id());
+ sb.appendChar(':');
+ Logging::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
+ logger->log(sb.data(), sb.size());
+}
+#endif
+
+// ============================================================================
+// [asmjit::BaseAssembler - Section Management]
+// ============================================================================
+
+static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
+ uint8_t* p = section->_buffer._data;
+
+ self->_section = section;
+ self->_bufferData = p;
+ self->_bufferPtr = p + section->_buffer._size;
+ self->_bufferEnd = p + section->_buffer._capacity;
+}
+
+Error BaseAssembler::section(Section* section) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return reportError(DebugUtils::errored(kErrorNotInitialized));
+
+ if (!_code->isSectionValid(section->id()) || _code->_sections[section->id()] != section)
+ return reportError(DebugUtils::errored(kErrorInvalidSection));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (hasEmitterOption(kOptionLoggingEnabled))
+ _code->_logger->logf(".section %s {#%u}\n", section->name(), section->id());
+#endif
+
+ BaseAssembler_initSection(this, section);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Label Management]
+// ============================================================================
+
+Label BaseAssembler::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newLabelEntry(&le);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ if (ASMJIT_LIKELY(_code)) {
+ LabelEntry* le;
+ Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ labelId = le->id();
+ }
+ return Label(labelId);
+}
+
+Error BaseAssembler::bind(const Label& label) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ Error err = _code->bindLabel(label, _section->id(), offset());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (hasEmitterOption(kOptionLoggingEnabled))
+ BaseAssembler_logLabel(this, label);
+#endif
+
+ resetInlineComment();
+ if (err)
+ return reportError(err);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Emit (Low-Level)]
+// ============================================================================
+
+Error BaseAssembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
+ _op4 = o4;
+ _op5 = o5;
+ _instOptions |= BaseInst::kOptionOp4Op5Used;
+ return _emit(instId, o0, o1, o2, o3);
+}
+
+Error BaseAssembler::_emitOpArray(uint32_t instId, const Operand_* operands, size_t count) {
+ const Operand_* o0 = &operands[0];
+ const Operand_* o1 = &operands[1];
+ const Operand_* o2 = &operands[2];
+ const Operand_* o3 = &operands[3];
+
+ switch (count) {
+ case 0: o0 = &Globals::none; ASMJIT_FALLTHROUGH;
+ case 1: o1 = &Globals::none; ASMJIT_FALLTHROUGH;
+ case 2: o2 = &Globals::none; ASMJIT_FALLTHROUGH;
+ case 3: o3 = &Globals::none; ASMJIT_FALLTHROUGH;
+ case 4:
+ return _emit(instId, *o0, *o1, *o2, *o3);
+
+ case 5:
+ _op4 = operands[4];
+ _op5.reset();
+ _instOptions |= BaseInst::kOptionOp4Op5Used;
+ return _emit(instId, *o0, *o1, *o2, *o3);
+
+ case 6:
+ _op4 = operands[4];
+ _op5 = operands[5];
+ _instOptions |= BaseInst::kOptionOp4Op5Used;
+ return _emit(instId, *o0, *o1, *o2, *o3);
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+#ifndef ASMJIT_NO_LOGGING
+void BaseAssembler::_emitLog(
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
+
+ Logger* logger = _code->logger();
+ ASMJIT_ASSERT(logger != nullptr);
+ ASMJIT_ASSERT(options & BaseEmitter::kOptionLoggingEnabled);
+
+ StringTmp<256> sb;
+ uint32_t flags = logger->flags();
+
+ uint8_t* beforeCursor = _bufferPtr;
+ intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
+
+ Operand_ operands[Globals::kMaxOpCount];
+ operands[0].copyFrom(o0);
+ operands[1].copyFrom(o1);
+ operands[2].copyFrom(o2);
+ operands[3].copyFrom(o3);
+
+ if (options & BaseInst::kOptionOp4Op5Used) {
+ operands[4].copyFrom(_op4);
+ operands[5].copyFrom(_op5);
+ }
+ else {
+ operands[4].reset();
+ operands[5].reset();
+ }
+
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
+ Logging::formatInstruction(sb, flags, this, archId(), BaseInst(instId, options, _extraReg), operands, Globals::kMaxOpCount);
+
+ if ((flags & FormatOptions::kFlagMachineCode) != 0)
+ Logging::formatLine(sb, _bufferPtr, size_t(emittedSize), relSize, immSize, inlineComment());
+ else
+ Logging::formatLine(sb, nullptr, std::numeric_limits<size_t>::max(), 0, 0, inlineComment());
+ logger->log(sb);
+}
+
+Error BaseAssembler::_emitFailed(
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+
+ StringTmp<256> sb;
+ sb.appendString(DebugUtils::errorAsString(err));
+ sb.appendString(": ");
+
+ Operand_ operands[Globals::kMaxOpCount];
+ operands[0].copyFrom(o0);
+ operands[1].copyFrom(o1);
+ operands[2].copyFrom(o2);
+ operands[3].copyFrom(o3);
+
+ if (options & BaseInst::kOptionOp4Op5Used) {
+ operands[4].copyFrom(_op4);
+ operands[5].copyFrom(_op5);
+ }
+ else {
+ operands[4].reset();
+ operands[5].reset();
+ }
+
+ Logging::formatInstruction(sb, 0, this, archId(), BaseInst(instId, options, _extraReg), operands, Globals::kMaxOpCount);
+
+ if (inlineComment()) {
+ sb.appendString(" ; ");
+ sb.appendString(inlineComment());
+ }
+
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return reportError(err, sb.data());
+}
+#endif
+
+// ============================================================================
+// [asmjit::BaseAssembler - Embed]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+struct DataSizeByPower {
+ char str[4];
+};
+
+static const DataSizeByPower dataSizeByPowerTable[] = {
+ { "db" },
+ { "dw" },
+ { "dd" },
+ { "dq" }
+};
+#endif
+
+Error BaseAssembler::embed(const void* data, uint32_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (dataSize == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+ writer.emitData(data, dataSize);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled)))
+ _code->_logger->logBinary(data, dataSize);
+#endif
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabel(const Label& label) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ ASMJIT_ASSERT(_code != nullptr);
+ RelocEntry* re;
+ LabelEntry* le = _code->labelEntry(label);
+
+ if (ASMJIT_UNLIKELY(!le))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ uint32_t dataSize = gpSize();
+ ASMJIT_ASSERT(dataSize <= 8);
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) {
+ StringTmp<256> sb;
+ sb.appendFormat(".%s ", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Logging::formatLabel(sb, 0, this, label.id());
+ sb.appendChar('\n');
+ _code->_logger->log(sb);
+ }
+#endif
+
+ // TODO: Does it make sense to calculate the address here if everything is known?
+ /*
+ if (_code->hasBaseAddress() && currentSection() == _code->textSection() && le->isBound()) {
+ uint64_t addr = _code->baseAddress() + _code->textSection()->offset() + le->offset();
+ writer.emitValueLE(addr, dataSize);
+ }
+ */
+
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, dataSize);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+
+ if (le->isBound()) {
+ re->_targetSectionId = le->section()->id();
+ re->_payload = le->offset();
+ }
+ else {
+ LabelLink* link = _code->newLabelLink(le, _section->id(), offset(), 0);
+ if (ASMJIT_UNLIKELY(!link))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ link->relocId = re->id();
+ }
+
+ // Emit dummy DWORD/QWORD depending on the data size.
+ writer.emitZeros(dataSize);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ LabelEntry* labelEntry = _code->labelEntry(label);
+ LabelEntry* baseEntry = _code->labelEntry(base);
+
+ if (ASMJIT_UNLIKELY(!labelEntry || !baseEntry))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ if (dataSize == 0)
+ dataSize = gpSize();
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(dataSize) || dataSize > 8))
+ return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
+
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
+
+#ifndef ASMJIT_NO_LOGGING
+ if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled))) {
+ StringTmp<256> sb;
+ sb.appendFormat(".%s (", dataSizeByPowerTable[Support::ctz(dataSize)].str);
+ Logging::formatLabel(sb, 0, this, label.id());
+ sb.appendString(" - ");
+ Logging::formatLabel(sb, 0, this, base.id());
+ sb.appendString(")\n");
+ _code->_logger->log(sb);
+ }
+#endif
+
+ // If both labels are bound within the same section it means the delta can be calculated now.
+ if (labelEntry->isBound() && baseEntry->isBound() && labelEntry->section() == baseEntry->section()) {
+ uint64_t delta = labelEntry->offset() - baseEntry->offset();
+ writer.emitValueLE(delta, dataSize);
+ }
+ else {
+ RelocEntry* re;
+ Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression, dataSize);
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ Expression* exp = _code->_zone.newT<Expression>();
+ if (ASMJIT_UNLIKELY(!exp))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ exp->reset();
+ exp->opType = Expression::kOpSub;
+ exp->setValueAsLabel(0, labelEntry);
+ exp->setValueAsLabel(1, baseEntry);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_payload = (uint64_t)(uintptr_t)exp;
+
+ writer.emitZeros(dataSize);
+ }
+
+ writer.done(this);
+ return kErrorOk;
+}
+
+Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!isLabelValid(label)))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ size_t size = pool.size();
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
+
+ pool.fill(writer.cursor());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (ASMJIT_UNLIKELY(hasEmitterOption(kOptionLoggingEnabled)))
+ _code->_logger->logBinary(writer.cursor(), size);
+#endif
+
+ writer.advance(size);
+ writer.done(this);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Comment]
+// ============================================================================
+
+Error BaseAssembler::comment(const char* data, size_t size) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (hasEmitterOption(kOptionLoggingEnabled)) {
+ Logger* logger = _code->logger();
+ logger->log(data, size);
+ logger->log("\n", 1);
+ return kErrorOk;
+ }
+#else
+ DebugUtils::unused(data, size);
+#endif
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseAssembler - Events]
+// ============================================================================
+
+Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ // Attach to the end of the .text section.
+ BaseAssembler_initSection(this, code->_sections[0]);
+
+ // And reset everything that is used temporarily.
+ _op4.reset();
+ _op5.reset();
+
+ return kErrorOk;
+}
+
+Error BaseAssembler::onDetach(CodeHolder* code) noexcept {
+ _section = nullptr;
+ _bufferData = nullptr;
+ _bufferEnd = nullptr;
+ _bufferPtr = nullptr;
+
+ _op4.reset();
+ _op5.reset();
+
+ return Base::onDetach(code);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/assembler.h b/3rdparty/asmjit/src/asmjit/core/assembler.h
new file mode 100644
index 00000000000..fd2c1c33c28
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/assembler.h
@@ -0,0 +1,176 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
+
+#include "../core/codeholder.h"
+#include "../core/datatypes.h"
+#include "../core/emitter.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::BaseAssembler]
+// ============================================================================
+
+//! Base encoder (assembler).
+class ASMJIT_VIRTAPI BaseAssembler : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseAssembler)
+ typedef BaseEmitter Base;
+
+ //! Current section where the assembling happens.
+ Section* _section;
+ //! Start of the CodeBuffer of the current section.
+ uint8_t* _bufferData;
+ //! End (first invalid byte) of the current section.
+ uint8_t* _bufferEnd;
+ //! Pointer in the CodeBuffer of the current section.
+ uint8_t* _bufferPtr;
+ //! 5th operand data, used only temporarily.
+ Operand_ _op4;
+ //! 6th operand data, used only temporarily.
+ Operand_ _op5;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseAssembler` instance.
+ ASMJIT_API BaseAssembler() noexcept;
+ //! Destroys the `BaseAssembler` instance.
+ ASMJIT_API virtual ~BaseAssembler() noexcept;
+
+ //! \}
+
+ //! \name Code-Buffer Management
+ //! \{
+
+ //! Returns the capacity of the current CodeBuffer.
+ inline size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
+ //! Returns the number of remaining bytes in the current CodeBuffer.
+ inline size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
+
+ //! Returns the current position in the CodeBuffer.
+ inline size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
+ //! Sets the current position in the CodeBuffer to `offset`.
+ //!
+ //! \note The `offset` cannot be outside of the buffer size (even if it's
+ //! within buffer's capacity).
+ ASMJIT_API Error setOffset(size_t offset);
+
+ //! Returns the start of the CodeBuffer in the current section.
+ inline uint8_t* bufferData() const noexcept { return _bufferData; }
+ //! Returns the end (first invalid byte) in the current section.
+ inline uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
+ //! Returns the current pointer in the CodeBuffer in the current section.
+ inline uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ inline Section* currentSection() const noexcept { return _section; }
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Emit
+ //! \{
+
+ using BaseEmitter::_emit;
+
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override;
+ ASMJIT_API Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t count) override;
+
+protected:
+#ifndef ASMJIT_NO_LOGGING
+ void _emitLog(
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3,
+ uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
+
+ Error _emitFailed(
+ Error err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
+#else
+ inline Error _emitFailed(
+ uint32_t err,
+ uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+
+ DebugUtils::unused(instId, options, o0, o1, o2, o3);
+ resetInstOptions();
+ resetInlineComment();
+ return reportError(err);
+ }
+#endif
+public:
+ //! \}
+ //! \endcond
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, uint32_t dataSize) override;
+ ASMJIT_API Error embedLabel(const Label& label) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ASSEMBLER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/builder.cpp b/3rdparty/asmjit/src/asmjit/core/builder.cpp
new file mode 100644
index 00000000000..35d7127481c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/builder.cpp
@@ -0,0 +1,1004 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/builder.h"
+#include "../core/logging.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::PostponedErrorHandler (Internal)]
+// ============================================================================
+
+//! Postponed error handler that never throws. Used as a temporal error handler
+//! to run passes. If error occurs, the caller is notified and will call the
+//! real error handler, that can throw.
+class PostponedErrorHandler : public ErrorHandler {
+public:
+ void handleError(Error err, const char* message, BaseEmitter* origin) override {
+ DebugUtils::unused(err, origin);
+ _message.assignString(message);
+ }
+
+ StringTmp<128> _message;
+};
+
+// ============================================================================
+// [asmjit::BaseBuilder - Construction / Destruction]
+// ============================================================================
+
+BaseBuilder::BaseBuilder() noexcept
+ : BaseEmitter(kTypeBuilder),
+ _codeZone(32768 - Zone::kBlockOverhead),
+ _dataZone(16384 - Zone::kBlockOverhead),
+ _passZone(65536 - Zone::kBlockOverhead),
+ _allocator(&_codeZone),
+ _passes(),
+ _labelNodes(),
+ _cursor(nullptr),
+ _firstNode(nullptr),
+ _lastNode(nullptr),
+ _nodeFlags(0) {}
+BaseBuilder::~BaseBuilder() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Node Management]
+// ============================================================================
+
+LabelNode* BaseBuilder::newLabelNode() noexcept {
+ LabelNode* node = newNodeT<LabelNode>();
+ if (!node || registerLabelNode(node) != kErrorOk)
+ return nullptr;
+ return node;
+}
+
+AlignNode* BaseBuilder::newAlignNode(uint32_t alignMode, uint32_t alignment) noexcept {
+ return newNodeT<AlignNode>(alignMode, alignment);
+}
+
+EmbedDataNode* BaseBuilder::newEmbedDataNode(const void* data, uint32_t size) noexcept {
+ if (size > EmbedDataNode::kInlineBufferSize) {
+ void* cloned = _dataZone.alloc(size);
+ if (ASMJIT_UNLIKELY(!cloned))
+ return nullptr;
+
+ if (data)
+ memcpy(cloned, data, size);
+ data = cloned;
+ }
+
+ return newNodeT<EmbedDataNode>(const_cast<void*>(data), size);
+}
+
+ConstPoolNode* BaseBuilder::newConstPoolNode() noexcept {
+ ConstPoolNode* node = newNodeT<ConstPoolNode>();
+ if (!node || registerLabelNode(node) != kErrorOk)
+ return nullptr;
+ return node;
+}
+
+CommentNode* BaseBuilder::newCommentNode(const char* data, size_t size) noexcept {
+ if (data) {
+ if (size == SIZE_MAX)
+ size = strlen(data);
+
+ if (size > 0) {
+ data = static_cast<char*>(_dataZone.dup(data, size, true));
+ if (!data) return nullptr;
+ }
+ }
+
+ return newNodeT<CommentNode>(data);
+}
+
+InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0) noexcept {
+ uint32_t opCount = 1;
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+
+ node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ node->setOp(0, o0);
+ node->resetOps(opCount, opCapacity);
+ return node;
+}
+
+InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1) noexcept {
+ uint32_t opCount = 2;
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+
+ node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->resetOps(opCount, opCapacity);
+ return node;
+}
+
+InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept {
+ uint32_t opCount = 3;
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+
+ node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ node->resetOps(opCount, opCapacity);
+ return node;
+}
+
+InstNode* BaseBuilder::newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept {
+ uint32_t opCount = 4;
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+
+ node = new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ node->setOp(3, o3);
+ node->resetOps(opCount, opCapacity);
+ return node;
+}
+
+InstNode* BaseBuilder::newInstNodeRaw(uint32_t instId, uint32_t instOptions, uint32_t opCount) noexcept {
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+ return new(node) InstNode(this, instId, instOptions, opCount, opCapacity);
+}
+
+BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+
+ if (!_cursor) {
+ if (!_firstNode) {
+ _firstNode = node;
+ _lastNode = node;
+ }
+ else {
+ node->_next = _firstNode;
+ _firstNode->_prev = node;
+ _firstNode = node;
+ }
+ }
+ else {
+ BaseNode* prev = _cursor;
+ BaseNode* next = _cursor->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+ }
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ _cursor = node;
+ return node;
+}
+
+BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node);
+ ASMJIT_ASSERT(ref);
+
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+
+ BaseNode* prev = ref;
+ BaseNode* next = ref->next();
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ prev->_next = node;
+ if (next)
+ next->_prev = node;
+ else
+ _lastNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
+ ASMJIT_ASSERT(node != nullptr);
+ ASMJIT_ASSERT(!node->_prev);
+ ASMJIT_ASSERT(!node->_next);
+ ASMJIT_ASSERT(!node->isActive());
+ ASMJIT_ASSERT(ref != nullptr);
+ ASMJIT_ASSERT(ref->isActive());
+
+ BaseNode* prev = ref->prev();
+ BaseNode* next = ref;
+
+ node->_prev = prev;
+ node->_next = next;
+
+ node->addFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ next->_prev = node;
+ if (prev)
+ prev->_next = node;
+ else
+ _firstNode = node;
+
+ return node;
+}
+
+BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
+ if (!node->isActive())
+ return node;
+
+ BaseNode* prev = node->prev();
+ BaseNode* next = node->next();
+
+ if (_firstNode == node)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == node)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ if (node->isSection())
+ _dirtySectionLinks = true;
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ return node;
+}
+
+void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
+ if (first == last) {
+ removeNode(first);
+ return;
+ }
+
+ if (!first->isActive())
+ return;
+
+ BaseNode* prev = first->prev();
+ BaseNode* next = last->next();
+
+ if (_firstNode == first)
+ _firstNode = next;
+ else
+ prev->_next = next;
+
+ if (_lastNode == last)
+ _lastNode = prev;
+ else
+ next->_prev = prev;
+
+ BaseNode* node = first;
+ uint32_t didRemoveSection = false;
+
+ for (;;) {
+ next = node->next();
+ ASMJIT_ASSERT(next != nullptr);
+
+ node->_prev = nullptr;
+ node->_next = nullptr;
+ node->clearFlags(BaseNode::kFlagIsActive);
+ didRemoveSection |= uint32_t(node->isSection());
+
+ if (_cursor == node)
+ _cursor = prev;
+
+ if (node == last)
+ break;
+ node = next;
+ }
+
+ if (didRemoveSection)
+ _dirtySectionLinks = true;
+}
+
+BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
+ BaseNode* old = _cursor;
+ _cursor = node;
+ return old;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Section]
+// ============================================================================
+
+Error BaseBuilder::sectionNodeOf(SectionNode** pOut, uint32_t sectionId) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!_code->isSectionValid(sectionId)))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ if (sectionId >= _sectionNodes.size())
+ ASMJIT_PROPAGATE(_sectionNodes.resize(&_allocator, sectionId + 1));
+
+ SectionNode* node = _sectionNodes[sectionId];
+ if (!node) {
+ node = newNodeT<SectionNode>(sectionId);
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ _sectionNodes[sectionId] = node;
+ }
+
+ *pOut = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::section(Section* section) {
+ SectionNode* node;
+ Error err = sectionNodeOf(&node, section->id());
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ if (!node->isActive()) {
+ // Insert the section at the end if it was not part of the code.
+ addAfter(node, lastNode());
+ _cursor = node;
+ }
+ else {
+ // This is a bit tricky. We cache section links to make sure that
+ // switching sections doesn't involve traversal in linked-list unless
+ // the position of the section has changed.
+ if (hasDirtySectionLinks())
+ updateSectionLinks();
+
+ if (node->_nextSection)
+ _cursor = node->_nextSection->_prev;
+ else
+ _cursor = _lastNode;
+ }
+
+ return kErrorOk;
+}
+
+void BaseBuilder::updateSectionLinks() noexcept {
+ if (!_dirtySectionLinks)
+ return;
+
+ BaseNode* node_ = _firstNode;
+ SectionNode* currentSection = nullptr;
+
+ while (node_) {
+ if (node_->isSection()) {
+ if (currentSection)
+ currentSection->_nextSection = node_->as<SectionNode>();
+ currentSection = node_->as<SectionNode>();
+ }
+ node_ = node_->next();
+ }
+
+ if (currentSection)
+ currentSection->_nextSection = nullptr;
+
+ _dirtySectionLinks = false;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Labels]
+// ============================================================================
+
+Error BaseBuilder::labelNodeOf(LabelNode** pOut, uint32_t labelId) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ uint32_t index = labelId;
+ if (ASMJIT_UNLIKELY(index >= _code->labelCount()))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (index >= _labelNodes.size())
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, index + 1));
+
+ LabelNode* node = _labelNodes[index];
+ if (!node) {
+ node = newNodeT<LabelNode>(labelId);
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ _labelNodes[index] = node;
+ }
+
+ *pOut = node;
+ return kErrorOk;
+}
+
+Error BaseBuilder::registerLabelNode(LabelNode* node) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ // Don't call `reportError()` from here, we are noexcept and we are called
+ // by `newLabelNode()` and `newFuncNode()`, which are noexcept as well.
+ LabelEntry* le;
+ ASMJIT_PROPAGATE(_code->newLabelEntry(&le));
+ uint32_t labelId = le->id();
+
+ // We just added one label so it must be true.
+ ASMJIT_ASSERT(_labelNodes.size() < labelId + 1);
+ ASMJIT_PROPAGATE(_labelNodes.resize(&_allocator, labelId + 1));
+
+ _labelNodes[labelId] = node;
+ node->_id = labelId;
+
+ return kErrorOk;
+}
+
+static Error BaseBuilder_newLabelInternal(BaseBuilder* self, uint32_t labelId) noexcept {
+ ASMJIT_ASSERT(self->_labelNodes.size() < labelId + 1);
+ LabelNode* node = self->newNodeT<LabelNode>(labelId);
+
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ ASMJIT_PROPAGATE(self->_labelNodes.resize(&self->_allocator, labelId + 1));
+ self->_labelNodes[labelId] = node;
+ node->_id = labelId;
+ return kErrorOk;
+}
+
+Label BaseBuilder::newLabel() {
+ uint32_t labelId = Globals::kInvalidId;
+ if (_code) {
+ LabelEntry* le;
+ Error err = _code->newLabelEntry(&le);
+ if (ASMJIT_UNLIKELY(err)) {
+ reportError(err);
+ }
+ else {
+ err = BaseBuilder_newLabelInternal(this, le->id());
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ }
+ return Label(labelId);
+}
+
+Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
+ uint32_t labelId = Globals::kInvalidId;
+ if (_code) {
+ LabelEntry* le;
+ Error err = _code->newNamedLabelEntry(&le, name, nameSize, type, parentId);
+ if (ASMJIT_UNLIKELY(err)) {
+ reportError(err);
+ }
+ else {
+ err = BaseBuilder_newLabelInternal(this, le->id());
+ if (ASMJIT_UNLIKELY(err))
+ reportError(err);
+ else
+ labelId = le->id();
+ }
+ }
+ return Label(labelId);
+}
+
+Error BaseBuilder::bind(const Label& label) {
+ LabelNode* node;
+ Error err = labelNodeOf(&node, label);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Passes]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
+ for (Pass* pass : _passes)
+ if (strcmp(pass->name(), name) == 0)
+ return pass;
+ return nullptr;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::addPass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr)) {
+ // Since this is directly called by `addPassT()` we treat `null` argument
+ // as out-of-memory condition. Otherwise it would be API misuse.
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ else if (ASMJIT_UNLIKELY(pass->_cb)) {
+ // Kinda weird, but okay...
+ if (pass->_cb == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ ASMJIT_PROPAGATE(_passes.append(&_allocator, pass));
+ pass->_cb = this;
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error BaseBuilder::deletePass(Pass* pass) noexcept {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(pass == nullptr))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (pass->_cb != nullptr) {
+ if (pass->_cb != this)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t index = _passes.indexOf(pass);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ pass->_cb = nullptr;
+ _passes.removeAt(index);
+ }
+
+ pass->~Pass();
+ return kErrorOk;
+}
+
+Error BaseBuilder::runPasses() {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (_passes.empty())
+ return kErrorOk;
+
+ Logger* logger = code()->logger();
+ ErrorHandler* prev = errorHandler();
+ PostponedErrorHandler postponed;
+
+ Error err = kErrorOk;
+ setErrorHandler(&postponed);
+
+ for (Pass* pass : _passes) {
+ _passZone.reset();
+ err = pass->run(&_passZone, logger);
+ if (err) break;
+ }
+ _passZone.reset();
+ setErrorHandler(prev);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err, !postponed._message.empty() ? postponed._message.data() : nullptr);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Emit]
+// ============================================================================
+
+Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+ uint32_t opCount = 4;
+
+ if (o3.isNone()) {
+ opCount = 3;
+ if (o2.isNone()) {
+ opCount = 2;
+ if (o1.isNone()) {
+ opCount = 1;
+ if (o0.isNone())
+ opCount = 0;
+ }
+ }
+ }
+
+ uint32_t options = instOptions() | globalInstOptions();
+ if (options & BaseInst::kOptionReserved) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_VALIDATION
+ // Strict validation.
+ if (hasEmitterOption(kOptionStrictValidation)) {
+ Operand_ opArray[4];
+ opArray[0].copyFrom(o0);
+ opArray[1].copyFrom(o1);
+ opArray[2].copyFrom(o2);
+ opArray[3].copyFrom(o3);
+
+ Error err = InstAPI::validate(archId(), BaseInst(instId, options, _extraReg), opArray, opCount);
+ if (ASMJIT_UNLIKELY(err)) {
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return reportError(err);
+ }
+ }
+#endif
+
+ // Clear options that should never be part of `InstNode`.
+ options &= ~BaseInst::kOptionReserved;
+ }
+
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= 4);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+
+ if (ASMJIT_UNLIKELY(!node)) {
+ resetExtraReg();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ node = new(node) InstNode(this, instId, options, opCount, opCapacity);
+ node->setExtraReg(extraReg());
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ node->setOp(3, o3);
+ node->resetOps(4, opCapacity);
+
+ if (comment)
+ node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ resetExtraReg();
+ return kErrorOk;
+}
+
+Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
+ uint32_t opCount = Globals::kMaxOpCount;
+ if (o5.isNone()) {
+ opCount = 5;
+ if (o4.isNone())
+ return _emit(instId, o0, o1, o2, o3);
+ }
+
+ uint32_t options = instOptions() | globalInstOptions();
+ if (ASMJIT_UNLIKELY(options & BaseInst::kOptionReserved)) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_VALIDATION
+ // Strict validation.
+ if (hasEmitterOption(kOptionStrictValidation)) {
+ Operand_ opArray[Globals::kMaxOpCount];
+ opArray[0].copyFrom(o0);
+ opArray[1].copyFrom(o1);
+ opArray[2].copyFrom(o2);
+ opArray[3].copyFrom(o3);
+ opArray[4].copyFrom(o4);
+ opArray[5].copyFrom(o5);
+
+ Error err = InstAPI::validate(archId(), BaseInst(instId, options, _extraReg), opArray, opCount);
+ if (ASMJIT_UNLIKELY(err)) {
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+ return reportError(err);
+ }
+ }
+#endif
+
+ // Clear options that should never be part of `InstNode`.
+ options &= ~BaseInst::kOptionReserved;
+ }
+
+ uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
+ ASMJIT_ASSERT(opCapacity >= opCount);
+
+ InstNode* node = _allocator.allocT<InstNode>(InstNode::nodeSizeOfOpCapacity(opCapacity));
+ const char* comment = inlineComment();
+
+ resetInstOptions();
+ resetInlineComment();
+
+ if (ASMJIT_UNLIKELY(!node)) {
+ resetExtraReg();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ node = new(node) InstNode(this, instId, options, opCount, opCapacity);
+ node->setExtraReg(extraReg());
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOp(2, o2);
+ node->setOp(3, o3);
+ node->setOp(4, o4);
+
+ if (opCapacity > 5)
+ node->setOp(5, o5);
+
+ if (comment)
+ node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ resetExtraReg();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Align]
+// ============================================================================
+
+Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ AlignNode* node = newAlignNode(alignMode, alignment);
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Embed]
+// ============================================================================
+
+Error BaseBuilder::embed(const void* data, uint32_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedDataNode* node = newEmbedDataNode(data, dataSize);
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedLabel(const Label& label) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedLabelNode* node = newNodeT<EmbedLabelNode>(label.id());
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ EmbedLabelDeltaNode* node = newNodeT<EmbedLabelDeltaNode>(label.id(), base.id(), dataSize);
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (!isLabelValid(label))
+ return reportError(DebugUtils::errored(kErrorInvalidLabel));
+
+ ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
+ ASMJIT_PROPAGATE(bind(label));
+
+ EmbedDataNode* node = newEmbedDataNode(nullptr, uint32_t(pool.size()));
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ pool.fill(node->data());
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Comment]
+// ============================================================================
+
+Error BaseBuilder::comment(const char* data, size_t size) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ CommentNode* node = newCommentNode(data, size);
+ if (ASMJIT_UNLIKELY(!node))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+
+ addNode(node);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Serialize]
+// ============================================================================
+
+Error BaseBuilder::serialize(BaseEmitter* dst) {
+ Error err = kErrorOk;
+ BaseNode* node_ = _firstNode;
+
+ do {
+ dst->setInlineComment(node_->inlineComment());
+
+ if (node_->isInst()) {
+ InstNode* node = node_->as<InstNode>();
+ err = dst->emitInst(node->baseInst(), node->operands(), node->opCount());
+ }
+ else if (node_->isLabel()) {
+ if (node_->isConstPool()) {
+ ConstPoolNode* node = node_->as<ConstPoolNode>();
+ err = dst->embedConstPool(node->label(), node->constPool());
+ }
+ else {
+ LabelNode* node = node_->as<LabelNode>();
+ err = dst->bind(node->label());
+ }
+ }
+ else if (node_->isAlign()) {
+ AlignNode* node = node_->as<AlignNode>();
+ err = dst->align(node->alignMode(), node->alignment());
+ }
+ else if (node_->isEmbedData()) {
+ EmbedDataNode* node = node_->as<EmbedDataNode>();
+ err = dst->embed(node->data(), node->size());
+ }
+ else if (node_->isEmbedLabel()) {
+ EmbedLabelNode* node = node_->as<EmbedLabelNode>();
+ err = dst->embedLabel(node->label());
+ }
+ else if (node_->isEmbedLabelDelta()) {
+ EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
+ err = dst->embedLabelDelta(node->label(), node->baseLabel(), node->dataSize());
+ }
+ else if (node_->isSection()) {
+ SectionNode* node = node_->as<SectionNode>();
+ err = dst->section(_code->sectionById(node->id()));
+ }
+ else if (node_->isComment()) {
+ CommentNode* node = node_->as<CommentNode>();
+ err = dst->comment(node->inlineComment());
+ }
+
+ if (err) break;
+ node_ = node_->next();
+ } while (node_);
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseBuilder - Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+Error BaseBuilder::dump(String& sb, uint32_t flags) const noexcept {
+ BaseNode* node = _firstNode;
+ while (node) {
+ ASMJIT_PROPAGATE(Logging::formatNode(sb, flags, this, node));
+ sb.appendChar('\n');
+ node = node->next();
+ }
+
+ return kErrorOk;
+}
+#endif
+
+// ============================================================================
+// [asmjit::BaseBuilder - Events]
+// ============================================================================
+
+Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ SectionNode* initialSection;
+ Error err = sectionNodeOf(&initialSection, 0);
+
+ if (!err)
+ err = _passes.willGrow(&_allocator, 8);
+
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+
+ _cursor = initialSection;
+ _firstNode = initialSection;
+ _lastNode = initialSection;
+ initialSection->setFlags(BaseNode::kFlagIsActive);
+
+ return kErrorOk;
+}
+
+Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
+ _passes.reset();
+ _sectionNodes.reset();
+ _labelNodes.reset();
+
+ _allocator.reset(&_codeZone);
+ _codeZone.reset();
+ _dataZone.reset();
+ _passZone.reset();
+
+ _nodeFlags = 0;
+
+ _cursor = nullptr;
+ _firstNode = nullptr;
+ _lastNode = nullptr;
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::Pass - Construction / Destruction]
+// ============================================================================
+
+Pass::Pass(const char* name) noexcept
+ : _cb(nullptr),
+ _name(name) {}
+Pass::~Pass() noexcept {}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
diff --git a/3rdparty/asmjit/src/asmjit/core/builder.h b/3rdparty/asmjit/src/asmjit/core/builder.h
new file mode 100644
index 00000000000..c6d359995fc
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/builder.h
@@ -0,0 +1,1305 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_BUILDER_H_INCLUDED
+#define ASMJIT_CORE_BUILDER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/assembler.h"
+#include "../core/codeholder.h"
+#include "../core/constpool.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_builder
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseBuilder;
+class Pass;
+
+class BaseNode;
+class InstNode;
+class SectionNode;
+class LabelNode;
+class AlignNode;
+class EmbedDataNode;
+class EmbedLabelNode;
+class ConstPoolNode;
+class CommentNode;
+class SentinelNode;
+class LabelDeltaNode;
+
+// Only used by Compiler infrastructure.
+class JumpAnnotation;
+
+// ============================================================================
+// [asmjit::BaseBuilder]
+// ============================================================================
+
+class ASMJIT_VIRTAPI BaseBuilder : public BaseEmitter {
+public:
+ ASMJIT_NONCOPYABLE(BaseBuilder)
+ typedef BaseEmitter Base;
+
+ //! Base zone used to allocate nodes and passes.
+ Zone _codeZone;
+ //! Data zone used to allocate data and names.
+ Zone _dataZone;
+ //! Pass zone, passed to `Pass::run()`.
+ Zone _passZone;
+ //! Allocator that uses `_codeZone`.
+ ZoneAllocator _allocator;
+
+ //! Array of `Pass` objects.
+ ZoneVector<Pass*> _passes;
+ //! Maps section indexes to `LabelNode` nodes.
+ ZoneVector<SectionNode*> _sectionNodes;
+ //! Maps label indexes to `LabelNode` nodes.
+ ZoneVector<LabelNode*> _labelNodes;
+
+ //! Current node (cursor).
+ BaseNode* _cursor;
+ //! First node of the current section.
+ BaseNode* _firstNode;
+ //! Last node of the current section.
+ BaseNode* _lastNode;
+
+ //! Flags assigned to each new node.
+ uint32_t _nodeFlags;
+ //! The sections links are dirty (used internally).
+ bool _dirtySectionLinks;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseBuilder` instance.
+ ASMJIT_API BaseBuilder() noexcept;
+ //! Destroys the `BaseBuilder` instance.
+ ASMJIT_API virtual ~BaseBuilder() noexcept;
+
+ //! \}
+
+ //! \name Node Management
+ //! \{
+
+ //! Returns the first node.
+ inline BaseNode* firstNode() const noexcept { return _firstNode; }
+ //! Returns the last node.
+ inline BaseNode* lastNode() const noexcept { return _lastNode; }
+
+ //! Allocates and instantiates a new node of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref BaseNode.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all nodes
+ //! it created so no manual memory management is required.
+ template<typename T>
+ inline T* newNodeT() noexcept {
+ return _allocator.newT<T>(this);
+ }
+
+ //! \overload
+ template<typename T, typename... Args>
+ inline T* newNodeT(Args&&... args) noexcept {
+ return _allocator.newT<T>(this, std::forward<Args>(args)...);
+ }
+
+ //! Creates a new `LabelNode`.
+ ASMJIT_API LabelNode* newLabelNode() noexcept;
+ //! Creates a new `AlignNode`.
+ ASMJIT_API AlignNode* newAlignNode(uint32_t alignMode, uint32_t alignment) noexcept;
+ //! Creates a new `EmbedDataNode`.
+ ASMJIT_API EmbedDataNode* newEmbedDataNode(const void* data, uint32_t size) noexcept;
+ //! Creates a new `ConstPoolNode`.
+ ASMJIT_API ConstPoolNode* newConstPoolNode() noexcept;
+ //! Creates a new `CommentNode`.
+ ASMJIT_API CommentNode* newCommentNode(const char* data, size_t size) noexcept;
+
+ ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0) noexcept;
+ ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1) noexcept;
+ ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2) noexcept;
+ ASMJIT_API InstNode* newInstNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) noexcept;
+ ASMJIT_API InstNode* newInstNodeRaw(uint32_t instId, uint32_t instOptions, uint32_t opCount) noexcept;
+
+ //! Adds `node` after the current and sets the current node to the given `node`.
+ ASMJIT_API BaseNode* addNode(BaseNode* node) noexcept;
+ //! Inserts the given `node` after `ref`.
+ ASMJIT_API BaseNode* addAfter(BaseNode* node, BaseNode* ref) noexcept;
+ //! Inserts the given `node` before `ref`.
+ ASMJIT_API BaseNode* addBefore(BaseNode* node, BaseNode* ref) noexcept;
+ //! Removes the given `node`.
+ ASMJIT_API BaseNode* removeNode(BaseNode* node) noexcept;
+ //! Removes multiple nodes.
+ ASMJIT_API void removeNodes(BaseNode* first, BaseNode* last) noexcept;
+
+ //! Returns the cursor.
+ //!
+ //! When the Builder/Compiler is created it automatically creates a '.text'
+ //! \ref SectionNode, which will be the initial one. When instructions are
+ //! added they are always added after the cursor and the cursor is changed
+ //! to be that newly added node. Use `setCursor()` to change where new nodes
+ //! are inserted.
+ inline BaseNode* cursor() const noexcept { return _cursor; }
+
+ //! Sets the current node to `node` and return the previous one.
+ ASMJIT_API BaseNode* setCursor(BaseNode* node) noexcept;
+
+ //! Sets the current node without returning the previous node.
+ //!
+ //! Only use this function if you are concerned about performance and want
+ //! this inlined (for example if you set the cursor in a loop, etc...).
+ inline void _setCursor(BaseNode* node) noexcept { _cursor = node; }
+
+ //! \}
+
+ //! \name Section Management
+ //! \{
+
+ //! Returns a vector of SectionNode objects.
+ //!
+ //! \note If a section of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector<SectionNode*>& sectionNodes() const noexcept { return _sectionNodes; }
+
+ //! Tests whether the `SectionNode` of the given `sectionId` was registered.
+ inline bool hasRegisteredSectionNode(uint32_t sectionId) const noexcept {
+ return sectionId < _sectionNodes.size() && _sectionNodes[sectionId] != nullptr;
+ }
+
+ //! Returns or creates a `SectionNode` that matches the given `sectionId`.
+ //!
+ //! \remarks This function will either get the existing `SectionNode` or create
+ //! it in case it wasn't created before. You can check whether a section has a
+ //! registered `SectionNode` by using `BaseBuilder::hasRegisteredSectionNode()`.
+ ASMJIT_API Error sectionNodeOf(SectionNode** pOut, uint32_t sectionId) noexcept;
+
+ ASMJIT_API Error section(Section* section) override;
+
+ //! Returns whether the section links of active section nodes are dirty. You can
+ //! update these links by calling `updateSectionLinks()` in such case.
+ inline bool hasDirtySectionLinks() const noexcept { return _dirtySectionLinks; }
+
+ //! Updates links of all active section nodes.
+ ASMJIT_API void updateSectionLinks() noexcept;
+
+ //! \}
+
+ //! \name Label Management
+ //! \{
+
+ //! Returns a vector of LabelNode nodes.
+ //!
+ //! \note If a label of some id is not associated with the Builder/Compiler
+ //! it would be null, so always check for nulls if you iterate over the vector.
+ inline const ZoneVector<LabelNode*>& labelNodes() const noexcept { return _labelNodes; }
+
+ //! Tests whether the `LabelNode` of the given `labelId` was registered.
+ inline bool hasRegisteredLabelNode(uint32_t labelId) const noexcept {
+ return labelId < _labelNodes.size() && _labelNodes[labelId] != nullptr;
+ }
+
+ //! \overload
+ inline bool hasRegisteredLabelNode(const Label& label) const noexcept {
+ return hasRegisteredLabelNode(label.id());
+ }
+
+ //! Gets or creates a `LabelNode` that matches the given `labelId`.
+ //!
+ //! \remarks This function will either get the existing `LabelNode` or create
+ //! it in case it wasn't created before. You can check whether a label has a
+ //! registered `LabelNode` by using `BaseBuilder::hasRegisteredLabelNode()`.
+ ASMJIT_API Error labelNodeOf(LabelNode** pOut, uint32_t labelId) noexcept;
+
+ //! \overload
+ inline Error labelNodeOf(LabelNode** pOut, const Label& label) noexcept {
+ return labelNodeOf(pOut, label.id());
+ }
+
+ //! Registers this label node [Internal].
+ //!
+ //! This function is used internally to register a newly created `LabelNode`
+ //! with this instance of Builder/Compiler. Use `labelNodeOf()` functions to
+ //! get back `LabelNode` from a label or its identifier.
+ ASMJIT_API Error registerLabelNode(LabelNode* node) noexcept;
+
+ ASMJIT_API Label newLabel() override;
+ ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
+ ASMJIT_API Error bind(const Label& label) override;
+
+ //! \}
+
+ //! \name Passes
+ //! \{
+
+ //! Returns a vector of `Pass` instances that will be executed by `runPasses()`.
+ inline const ZoneVector<Pass*>& passes() const noexcept { return _passes; }
+
+ //! Allocates and instantiates a new pass of type `T` and returns its instance.
+ //! If the allocation fails `nullptr` is returned.
+ //!
+ //! The template argument `T` must be a type that is extends \ref Pass.
+ //!
+ //! \remarks The pointer returned (if non-null) is owned by the Builder or
+ //! Compiler. When the Builder/Compiler is destroyed it destroys all passes
+ //! it created so no manual memory management is required.
+ template<typename T>
+ inline T* newPassT() noexcept { return _codeZone.newT<T>(); }
+
+ //! \overload
+ template<typename T, typename... Args>
+ inline T* newPassT(Args&&... args) noexcept { return _codeZone.newT<T>(std::forward<Args>(args)...); }
+
+ template<typename T>
+ inline Error addPassT() noexcept { return addPass(newPassT<T>()); }
+
+ template<typename T, typename... Args>
+ inline Error addPassT(Args&&... args) noexcept { return addPass(newPassT<T, Args...>(std::forward<Args>(args)...)); }
+
+ //! Returns `Pass` by name.
+ ASMJIT_API Pass* passByName(const char* name) const noexcept;
+ //! Adds `pass` to the list of passes.
+ ASMJIT_API Error addPass(Pass* pass) noexcept;
+ //! Removes `pass` from the list of passes and delete it.
+ ASMJIT_API Error deletePass(Pass* pass) noexcept;
+
+ //! Runs all passes in order.
+ ASMJIT_API Error runPasses();
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) override;
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ ASMJIT_API Error align(uint32_t alignMode, uint32_t alignment) override;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ ASMJIT_API Error embed(const void* data, uint32_t dataSize) override;
+ ASMJIT_API Error embedLabel(const Label& label) override;
+ ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) override;
+ ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ ASMJIT_API Error comment(const char* data, size_t size = SIZE_MAX) override;
+
+ //! \}
+
+ //! \name Serialization
+ //! \{
+
+ //! Serializes everything the given emitter `dst`.
+ //!
+ //! Although not explicitly required the emitter will most probably be of
+ //! Assembler type. The reason is that there is no known use of serializing
+ //! nodes held by Builder/Compiler into another Builder-like emitter.
+ ASMJIT_API Error serialize(BaseEmitter* dst);
+
+ //! \}
+
+ //! \name Logging
+ //! \{
+
+#ifndef ASMJIT_NO_LOGGING
+ ASMJIT_API Error dump(String& sb, uint32_t flags = 0) const noexcept;
+#endif
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseNode]
+// ============================================================================
+
+//! Base node.
+//!
+//! Every node represents a building-block used by `BaseBuilder`. It can be
+//! instruction, data, label, comment, directive, or any other high-level
+//! representation that can be transformed to the building blocks mentioned.
+//! Every class that inherits `BaseBuilder` can define its own nodes that it
+//! can lower to basic nodes.
+class BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(BaseNode)
+
+ union {
+ struct {
+ //! Previous node.
+ BaseNode* _prev;
+ //! Next node.
+ BaseNode* _next;
+ };
+ //! Links (previous and next nodes).
+ BaseNode* _links[2];
+ };
+
+ //! Data shared between all types of nodes.
+ struct AnyData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Not used by BaseNode.
+ uint8_t _reserved0;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ struct InstData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Instruction operands count (used).
+ uint8_t _opCount;
+ //! Instruction operands capacity (allocated).
+ uint8_t _opCapacity;
+ };
+
+ struct SentinelData {
+ //! Node type, see \ref NodeType.
+ uint8_t _nodeType;
+ //! Node flags, see \ref Flags.
+ uint8_t _nodeFlags;
+ //! Sentinel type.
+ uint8_t _sentinelType;
+ //! Not used by BaseNode.
+ uint8_t _reserved1;
+ };
+
+ union {
+ AnyData _any;
+ InstData _inst;
+ SentinelData _sentinel;
+ };
+
+ //! Node position in code (should be unique).
+ uint32_t _position;
+
+ //! Value reserved for AsmJit users never touched by AsmJit itself.
+ union {
+ uint64_t _userDataU64;
+ void* _userDataPtr;
+ };
+
+ //! Data used exclusively by the current `Pass`.
+ void* _passData;
+
+ //! Inline comment/annotation or nullptr if not used.
+ const char* _inlineComment;
+
+ //! Type of `BaseNode`.
+ enum NodeType : uint32_t {
+ //! Invalid node (internal, don't use).
+ kNodeNone = 0,
+
+ // [BaseBuilder]
+
+ //! Node is `InstNode` or `InstExNode`.
+ kNodeInst = 1,
+ //! Node is `SectionNode`.
+ kNodeSection = 2,
+ //! Node is `LabelNode`.
+ kNodeLabel = 3,
+ //! Node is `AlignNode`.
+ kNodeAlign = 4,
+ //! Node is `EmbedDataNode`.
+ kNodeEmbedData = 5,
+ //! Node is `EmbedLabelNode`.
+ kNodeEmbedLabel = 6,
+ //! Node is `EmbedLabelDeltaNode`.
+ kNodeEmbedLabelDelta = 7,
+ //! Node is `ConstPoolNode`.
+ kNodeConstPool = 8,
+ //! Node is `CommentNode`.
+ kNodeComment = 9,
+ //! Node is `SentinelNode`.
+ kNodeSentinel = 10,
+
+ // [BaseCompiler]
+
+ //! Node is `JumpNode` (acts as InstNode).
+ kNodeJump = 15,
+ //! Node is `FuncNode` (acts as LabelNode).
+ kNodeFunc = 16,
+ //! Node is `FuncRetNode` (acts as InstNode).
+ kNodeFuncRet = 17,
+ //! Node is `FuncCallNode` (acts as InstNode).
+ kNodeFuncCall = 18,
+
+ // [UserDefined]
+
+ //! First id of a user-defined node.
+ kNodeUser = 32
+ };
+
+ //! Node flags, specify what the node is and/or does.
+ enum Flags : uint32_t {
+ kFlagIsCode = 0x01u, //!< Node is code that can be executed (instruction, label, align, etc...).
+ kFlagIsData = 0x02u, //!< Node is data that cannot be executed (data, const-pool, etc...).
+ kFlagIsInformative = 0x04u, //!< Node is informative, can be removed and ignored.
+ kFlagIsRemovable = 0x08u, //!< Node can be safely removed if unreachable.
+ kFlagHasNoEffect = 0x10u, //!< Node does nothing when executed (label, align, explicit nop).
+ kFlagActsAsInst = 0x20u, //!< Node is an instruction or acts as it.
+ kFlagActsAsLabel = 0x40u, //!< Node is a label or acts as it.
+ kFlagIsActive = 0x80u //!< Node is active (part of the code).
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseNode` - always use `BaseBuilder` to allocate nodes.
+ ASMJIT_INLINE BaseNode(BaseBuilder* cb, uint32_t type, uint32_t flags = 0) noexcept {
+ _prev = nullptr;
+ _next = nullptr;
+ _any._nodeType = uint8_t(type);
+ _any._nodeFlags = uint8_t(flags | cb->_nodeFlags);
+ _any._reserved0 = 0;
+ _any._reserved1 = 0;
+ _position = 0;
+ _userDataU64 = 0;
+ _passData = nullptr;
+ _inlineComment = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Casts this node to `T*`.
+ template<typename T>
+ inline T* as() noexcept { return static_cast<T*>(this); }
+ //! Casts this node to `const T*`.
+ template<typename T>
+ inline const T* as() const noexcept { return static_cast<const T*>(this); }
+
+ //! Returns previous node or `nullptr` if this node is either first or not
+ //! part of Builder/Compiler node-list.
+ inline BaseNode* prev() const noexcept { return _prev; }
+ //! Returns next node or `nullptr` if this node is either last or not part
+ //! of Builder/Compiler node-list.
+ inline BaseNode* next() const noexcept { return _next; }
+
+ //! Returns the type of the node, see `NodeType`.
+ inline uint32_t type() const noexcept { return _any._nodeType; }
+
+ //! Sets the type of the node, see `NodeType` (internal).
+ //!
+ //! \remarks You should never set a type of a node to anything else than the
+ //! initial value. This function is only provided for users that use custom
+ //! nodes and need to change the type either during construction or later.
+ inline void setType(uint32_t type) noexcept { _any._nodeType = uint8_t(type); }
+
+ //! Tests whether this node is either `InstNode` or extends it.
+ inline bool isInst() const noexcept { return hasFlag(kFlagActsAsInst); }
+ //! Tests whether this node is `SectionNode`.
+ inline bool isSection() const noexcept { return type() == kNodeSection; }
+ //! Tests whether this node is either `LabelNode` or extends it.
+ inline bool isLabel() const noexcept { return hasFlag(kFlagActsAsLabel); }
+ //! Tests whether this node is `AlignNode`.
+ inline bool isAlign() const noexcept { return type() == kNodeAlign; }
+ //! Tests whether this node is `EmbedDataNode`.
+ inline bool isEmbedData() const noexcept { return type() == kNodeEmbedData; }
+ //! Tests whether this node is `EmbedLabelNode`.
+ inline bool isEmbedLabel() const noexcept { return type() == kNodeEmbedLabel; }
+ //! Tests whether this node is `EmbedLabelDeltaNode`.
+ inline bool isEmbedLabelDelta() const noexcept { return type() == kNodeEmbedLabelDelta; }
+ //! Tests whether this node is `ConstPoolNode`.
+ inline bool isConstPool() const noexcept { return type() == kNodeConstPool; }
+ //! Tests whether this node is `CommentNode`.
+ inline bool isComment() const noexcept { return type() == kNodeComment; }
+ //! Tests whether this node is `SentinelNode`.
+ inline bool isSentinel() const noexcept { return type() == kNodeSentinel; }
+
+ //! Tests whether this node is `FuncNode`.
+ inline bool isFunc() const noexcept { return type() == kNodeFunc; }
+ //! Tests whether this node is `FuncRetNode`.
+ inline bool isFuncRet() const noexcept { return type() == kNodeFuncRet; }
+ //! Tests whether this node is `FuncCallNode`.
+ inline bool isFuncCall() const noexcept { return type() == kNodeFuncCall; }
+
+ //! Returns the node flags, see \ref Flags.
+ inline uint32_t flags() const noexcept { return _any._nodeFlags; }
+ //! Tests whether the node has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_any._nodeFlags) & flag) != 0; }
+ //! Replaces node flags with `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(flags); }
+ //! Adds the given `flags` to node flags.
+ inline void addFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags | flags); }
+ //! Clears the given `flags` from node flags.
+ inline void clearFlags(uint32_t flags) noexcept { _any._nodeFlags = uint8_t(_any._nodeFlags & (flags ^ 0xFF)); }
+
+ //! Tests whether the node is code that can be executed.
+ inline bool isCode() const noexcept { return hasFlag(kFlagIsCode); }
+ //! Tests whether the node is data that cannot be executed.
+ inline bool isData() const noexcept { return hasFlag(kFlagIsData); }
+ //! Tests whether the node is informative only (is never encoded like comment, etc...).
+ inline bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); }
+ //! Tests whether the node is removable if it's in an unreachable code block.
+ inline bool isRemovable() const noexcept { return hasFlag(kFlagIsRemovable); }
+ //! Tests whether the node has no effect when executed (label, .align, nop, ...).
+ inline bool hasNoEffect() const noexcept { return hasFlag(kFlagHasNoEffect); }
+ //! Tests whether the node is part of the code.
+ inline bool isActive() const noexcept { return hasFlag(kFlagIsActive); }
+
+ //! Tests whether the node has a position assigned.
+ //!
+ //! \remarks Returns `true` if node position is non-zero.
+ inline bool hasPosition() const noexcept { return _position != 0; }
+ //! Returns node position.
+ inline uint32_t position() const noexcept { return _position; }
+ //! Sets node position.
+ //!
+ //! Node position is a 32-bit unsigned integer that is used by Compiler to
+ //! track where the node is relatively to the start of the function. It doesn't
+ //! describe a byte position in a binary, instead it's just a pseudo position
+ //! used by liveness analysis and other tools around Compiler.
+ //!
+ //! If you don't use Compiler then you may use `position()` and `setPosition()`
+ //! freely for your own purposes if the 32-bit value limit is okay for you.
+ inline void setPosition(uint32_t position) noexcept { _position = position; }
+
+ //! Returns user data casted to `T*`.
+ //!
+ //! User data is decicated to be used only by AsmJit users and not touched
+ //! by the library. The data has a pointer size so you can either store a
+ //! pointer or `intptr_t` value through `setUserDataAsIntPtr()`.
+ template<typename T>
+ inline T* userDataAsPtr() const noexcept { return static_cast<T*>(_userDataPtr); }
+ //! Returns user data casted to `int64_t`.
+ inline int64_t userDataAsInt64() const noexcept { return int64_t(_userDataU64); }
+ //! Returns user data casted to `uint64_t`.
+ inline uint64_t userDataAsUInt64() const noexcept { return _userDataU64; }
+
+ //! Sets user data to `data`.
+ template<typename T>
+ inline void setUserDataAsPtr(T* data) noexcept { _userDataPtr = static_cast<void*>(data); }
+ //! Sets used data to the given 64-bit signed `value`.
+ inline void setUserDataAsInt64(int64_t value) noexcept { _userDataU64 = uint64_t(value); }
+ //! Sets used data to the given 64-bit unsigned `value`.
+ inline void setUserDataAsUInt64(uint64_t value) noexcept { _userDataU64 = value; }
+
+ //! Resets user data to zero / nullptr.
+ inline void resetUserData() noexcept { _userDataU64 = 0; }
+
+ //! Tests whether the node has an associated pass data.
+ inline bool hasPassData() const noexcept { return _passData != nullptr; }
+ //! Returns the node pass data - data used during processing & transformations.
+ template<typename T>
+ inline T* passData() const noexcept { return (T*)_passData; }
+ //! Sets the node pass data to `data`.
+ template<typename T>
+ inline void setPassData(T* data) noexcept { _passData = (void*)data; }
+ //! Resets the node pass data to nullptr.
+ inline void resetPassData() noexcept { _passData = nullptr; }
+
+ //! Tests whether the node has an inline comment/annotation.
+ inline bool hasInlineComment() const noexcept { return _inlineComment != nullptr; }
+ //! Returns an inline comment/annotation string.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets an inline comment/annotation string to `s`.
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets an inline comment/annotation string to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstNode]
+// ============================================================================
+
+//! Instruction node.
+//!
+//! Wraps an instruction with its options and operands.
+class InstNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(InstNode)
+
+ enum : uint32_t {
+ //! Count of embedded operands per `InstNode` that are always allocated as
+ //! a part of the instruction. Minimum embedded operands is 4, but in 32-bit
+ //! more pointers are smaller and we can embed 5. The rest (up to 6 operands)
+ //! is always stored in `InstExNode`.
+ kBaseOpCapacity = uint32_t((128 - sizeof(BaseNode) - sizeof(BaseInst)) / sizeof(Operand_))
+ };
+
+ //! Base instruction data.
+ BaseInst _baseInst;
+ //! First 4 or 5 operands (indexed from 0).
+ Operand_ _opArray[kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstNode` instance.
+ ASMJIT_INLINE InstNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCount, uint32_t opCapacity = kBaseOpCapacity) noexcept
+ : BaseNode(cb, kNodeInst, kFlagIsCode | kFlagIsRemovable | kFlagActsAsInst),
+ _baseInst(instId, options) {
+ _inst._opCapacity = uint8_t(opCapacity);
+ _inst._opCount = uint8_t(opCount);
+ }
+
+ //! Reset all built-in operands, including `extraReg`.
+ inline void _resetOps() noexcept {
+ _baseInst.resetExtraReg();
+ for (uint32_t i = 0, count = opCapacity(); i < count; i++)
+ _opArray[i].reset();
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline BaseInst& baseInst() noexcept { return _baseInst; }
+ inline const BaseInst& baseInst() const noexcept { return _baseInst; }
+
+ //! Returns the instruction id, see `BaseInst::Id`.
+ inline uint32_t id() const noexcept { return _baseInst.id(); }
+ //! Sets the instruction id to `id`, see `BaseInst::Id`.
+ inline void setId(uint32_t id) noexcept { _baseInst.setId(id); }
+
+ //! Returns instruction options.
+ inline uint32_t instOptions() const noexcept { return _baseInst.options(); }
+ //! Sets instruction options.
+ inline void setInstOptions(uint32_t options) noexcept { _baseInst.setOptions(options); }
+ //! Adds instruction options.
+ inline void addInstOptions(uint32_t options) noexcept { _baseInst.addOptions(options); }
+ //! Clears instruction options.
+ inline void clearInstOptions(uint32_t options) noexcept { _baseInst.clearOptions(options); }
+
+ //! Tests whether the node has an extra register operand.
+ inline bool hasExtraReg() const noexcept { return _baseInst.hasExtraReg(); }
+ //! Returns extra register operand.
+ inline RegOnly& extraReg() noexcept { return _baseInst.extraReg(); }
+ //! \overload
+ inline const RegOnly& extraReg() const noexcept { return _baseInst.extraReg(); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const BaseReg& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Sets extra register operand to `reg`.
+ inline void setExtraReg(const RegOnly& reg) noexcept { _baseInst.setExtraReg(reg); }
+ //! Resets extra register operand.
+ inline void resetExtraReg() noexcept { _baseInst.resetExtraReg(); }
+
+ //! Returns operands count.
+ inline uint32_t opCount() const noexcept { return _inst._opCount; }
+ //! Returns operands capacity.
+ inline uint32_t opCapacity() const noexcept { return _inst._opCapacity; }
+
+ //! Sets operands count.
+ inline void setOpCount(uint32_t opCount) noexcept { _inst._opCount = uint8_t(opCount); }
+
+ //! Returns operands array.
+ inline Operand* operands() noexcept { return (Operand*)_opArray; }
+ //! Returns operands array (const).
+ inline const Operand* operands() const noexcept { return (const Operand*)_opArray; }
+
+ inline Operand& opType(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as<Operand>();
+ }
+
+ inline const Operand& opType(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ return _opArray[index].as<Operand>();
+ }
+
+ inline void setOp(uint32_t index, const Operand_& op) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].copyFrom(op);
+ }
+
+ inline void resetOp(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < opCapacity());
+ _opArray[index].reset();
+ }
+
+ inline void resetOps(uint32_t start, uint32_t end) noexcept {
+ for (uint32_t i = start; i < end; i++)
+ _opArray[i].reset();
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline bool hasOpType(uint32_t opType) const noexcept {
+ for (uint32_t i = 0, count = opCount(); i < count; i++)
+ if (_opArray[i].opType() == opType)
+ return true;
+ return false;
+ }
+
+ inline bool hasRegOp() const noexcept { return hasOpType(Operand::kOpReg); }
+ inline bool hasMemOp() const noexcept { return hasOpType(Operand::kOpMem); }
+ inline bool hasImmOp() const noexcept { return hasOpType(Operand::kOpImm); }
+ inline bool hasLabelOp() const noexcept { return hasOpType(Operand::kOpLabel); }
+
+ inline uint32_t indexOfOpType(uint32_t opType) const noexcept {
+ uint32_t i = 0;
+ uint32_t count = opCount();
+
+ while (i < count) {
+ if (_opArray[i].opType() == opType)
+ break;
+ i++;
+ }
+
+ return i;
+ }
+
+ inline uint32_t indexOfMemOp() const noexcept { return indexOfOpType(Operand::kOpMem); }
+ inline uint32_t indexOfImmOp() const noexcept { return indexOfOpType(Operand::kOpImm); }
+ inline uint32_t indexOfLabelOp() const noexcept { return indexOfOpType(Operand::kOpLabel); }
+
+ //! \}
+
+ //! \name Rewriting
+ //! \{
+
+ inline uint32_t* _getRewriteArray() noexcept { return &_baseInst._extraReg._id; }
+ inline const uint32_t* _getRewriteArray() const noexcept { return &_baseInst._extraReg._id; }
+
+ ASMJIT_INLINE uint32_t getRewriteIndex(const uint32_t* id) const noexcept {
+ const uint32_t* array = _getRewriteArray();
+ ASMJIT_ASSERT(array <= id);
+
+ size_t index = (size_t)(id - array);
+ ASMJIT_ASSERT(index < 32);
+
+ return uint32_t(index);
+ }
+
+ ASMJIT_INLINE void rewriteIdAtIndex(uint32_t index, uint32_t id) noexcept {
+ uint32_t* array = _getRewriteArray();
+ array[index] = id;
+ }
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ static inline uint32_t capacityOfOpCount(uint32_t opCount) noexcept {
+ return opCount <= kBaseOpCapacity ? kBaseOpCapacity : Globals::kMaxOpCount;
+ }
+
+ static inline size_t nodeSizeOfOpCapacity(uint32_t opCapacity) noexcept {
+ size_t base = sizeof(InstNode) - kBaseOpCapacity * sizeof(Operand);
+ return base + opCapacity * sizeof(Operand);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstExNode]
+// ============================================================================
+
+//! Instruction node with maximum number of operands..
+//!
+//! This node is created automatically by Builder/Compiler in case that the
+//! required number of operands exceeds the default capacity of `InstNode`.
+class InstExNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(InstExNode)
+
+ //! Continued `_opArray[]` to hold up to `kMaxOpCount` operands.
+ Operand_ _opArrayEx[Globals::kMaxOpCount - kBaseOpCapacity];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `InstExNode` instance.
+ inline InstExNode(BaseBuilder* cb, uint32_t instId, uint32_t options, uint32_t opCapacity = Globals::kMaxOpCount) noexcept
+ : InstNode(cb, instId, options, opCapacity) {}
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SectionNode]
+// ============================================================================
+
+//! Section node.
+class SectionNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SectionNode)
+
+ //! Section id.
+ uint32_t _id;
+
+ //! Next section node that follows this section.
+ //!
+ //! This link is only valid when the section is active (is part of the code)
+ //! and when `Builder::hasDirtySectionLinks()` returns `false`. If you intend
+ //! to use this field you should always call `Builder::updateSectionLinks()`
+ //! before you do so.
+ SectionNode* _nextSection;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SectionNode` instance.
+ inline SectionNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : BaseNode(cb, kNodeSection, kFlagHasNoEffect),
+ _id(id),
+ _nextSection(nullptr) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the section id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelNode]
+// ============================================================================
+
+//! Label node.
+class LabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(LabelNode)
+
+ uint32_t _id;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `LabelNode` instance.
+ inline LabelNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : BaseNode(cb, kNodeLabel, kFlagHasNoEffect | kFlagActsAsLabel),
+ _id(id) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the id of the label.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Returns the label as `Label` operand.
+ inline Label label() const noexcept { return Label(_id); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::AlignNode]
+// ============================================================================
+
+//! Align directive (BaseBuilder).
+//!
+//! Wraps `.align` directive.
+class AlignNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(AlignNode)
+
+ //! Align mode, see `AlignMode`.
+ uint32_t _alignMode;
+ //! Alignment (in bytes).
+ uint32_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `AlignNode` instance.
+ inline AlignNode(BaseBuilder* cb, uint32_t alignMode, uint32_t alignment) noexcept
+ : BaseNode(cb, kNodeAlign, kFlagIsCode | kFlagHasNoEffect),
+ _alignMode(alignMode),
+ _alignment(alignment) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns align mode.
+ inline uint32_t alignMode() const noexcept { return _alignMode; }
+ //! Sets align mode to `alignMode`.
+ inline void setAlignMode(uint32_t alignMode) noexcept { _alignMode = alignMode; }
+
+ //! Returns align offset in bytes.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ //! Sets align offset in bytes to `offset`.
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedDataNode]
+// ============================================================================
+
+//! Embed data node.
+//!
+//! Wraps `.data` directive. The node contains data that will be placed at the
+//! node's position in the assembler stream. The data is considered to be RAW;
+//! no analysis nor byte-order conversion is performed on RAW data.
+class EmbedDataNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedDataNode)
+
+ enum : uint32_t {
+ kInlineBufferSize = uint32_t(64 - sizeof(BaseNode) - 4)
+ };
+
+ union {
+ struct {
+ //! Embedded data buffer.
+ uint8_t _buf[kInlineBufferSize];
+ //! Size of the data.
+ uint32_t _size;
+ };
+ struct {
+ //! Pointer to external data.
+ uint8_t* _externalPtr;
+ };
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedDataNode` instance.
+ inline EmbedDataNode(BaseBuilder* cb, void* data, uint32_t size) noexcept
+ : BaseNode(cb, kNodeEmbedData, kFlagIsData) {
+
+ if (size <= kInlineBufferSize) {
+ if (data)
+ memcpy(_buf, data, size);
+ }
+ else {
+ _externalPtr = static_cast<uint8_t*>(data);
+ }
+ _size = size;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns pointer to the data.
+ inline uint8_t* data() const noexcept { return _size <= kInlineBufferSize ? const_cast<uint8_t*>(_buf) : _externalPtr; }
+ //! Returns size of the data.
+ inline uint32_t size() const noexcept { return _size; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelNode)
+
+ uint32_t _id;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelNode` instance.
+ inline EmbedLabelNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabel, kFlagIsData),
+ _id(id) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the id of the label.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Sets the label id (use with caution, improper use can break a lot of things).
+ inline void setId(uint32_t id) noexcept { _id = id; }
+
+ //! Returns the label as `Label` operand.
+ inline Label label() const noexcept { return Label(_id); }
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setId(label.id()); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::EmbedLabelDeltaNode]
+// ============================================================================
+
+//! Label data node.
+class EmbedLabelDeltaNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(EmbedLabelDeltaNode)
+
+ uint32_t _id;
+ uint32_t _baseId;
+ uint32_t _dataSize;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `EmbedLabelDeltaNode` instance.
+ inline EmbedLabelDeltaNode(BaseBuilder* cb, uint32_t id = 0, uint32_t baseId = 0, uint32_t dataSize = 0) noexcept
+ : BaseNode(cb, kNodeEmbedLabelDelta, kFlagIsData),
+ _id(id),
+ _baseId(baseId),
+ _dataSize(dataSize) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the id of the label.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Sets the label id.
+ inline void setId(uint32_t id) noexcept { _id = id; }
+ //! Returns the label as `Label` operand.
+ inline Label label() const noexcept { return Label(_id); }
+ //! Sets the label id from `label` operand.
+ inline void setLabel(const Label& label) noexcept { setId(label.id()); }
+
+ //! Returns the id of the base label.
+ inline uint32_t baseId() const noexcept { return _baseId; }
+ //! Sets the base label id.
+ inline void setBaseId(uint32_t baseId) noexcept { _baseId = baseId; }
+ //! Returns the base label as `Label` operand.
+ inline Label baseLabel() const noexcept { return Label(_baseId); }
+ //! Sets the base label id from `label` operand.
+ inline void setBaseLabel(const Label& baseLabel) noexcept { setBaseId(baseLabel.id()); }
+
+ inline uint32_t dataSize() const noexcept { return _dataSize; }
+ inline void setDataSize(uint32_t dataSize) noexcept { _dataSize = dataSize; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ConstPoolNode]
+// ============================================================================
+
+//! A node that wraps `ConstPool`.
+class ConstPoolNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(ConstPoolNode)
+
+ ConstPool _constPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `ConstPoolNode` instance.
+ inline ConstPoolNode(BaseBuilder* cb, uint32_t id = 0) noexcept
+ : LabelNode(cb, id),
+ _constPool(&cb->_codeZone) {
+
+ setType(kNodeConstPool);
+ addFlags(kFlagIsData);
+ clearFlags(kFlagIsCode | kFlagHasNoEffect);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _constPool.empty(); }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _constPool.size(); }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _constPool.alignment(); }
+
+ //! Returns the wrapped `ConstPool` instance.
+ inline ConstPool& constPool() noexcept { return _constPool; }
+ //! Returns the wrapped `ConstPool` instance (const).
+ inline const ConstPool& constPool() const noexcept { return _constPool; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! See `ConstPool::add()`.
+ inline Error add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ return _constPool.add(data, size, dstOffset);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CommentNode]
+// ============================================================================
+
+//! Comment node.
+class CommentNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(CommentNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `CommentNode` instance.
+ inline CommentNode(BaseBuilder* cb, const char* comment) noexcept
+ : BaseNode(cb, kNodeComment, kFlagIsInformative | kFlagHasNoEffect | kFlagIsRemovable) {
+ _inlineComment = comment;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::SentinelNode]
+// ============================================================================
+
+//! Sentinel node.
+//!
+//! Sentinel is a marker that is completely ignored by the code builder. It's
+//! used to remember a position in a code as it never gets removed by any pass.
+class SentinelNode : public BaseNode {
+public:
+ ASMJIT_NONCOPYABLE(SentinelNode)
+
+ //! Type of the sentinel (purery informative purpose).
+ enum SentinelType : uint32_t {
+ kSentinelUnknown = 0u,
+ kSentinelFuncEnd = 1u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `SentinelNode` instance.
+ inline SentinelNode(BaseBuilder* cb, uint32_t sentinelType = kSentinelUnknown) noexcept
+ : BaseNode(cb, kNodeSentinel, kFlagIsInformative | kFlagHasNoEffect) {
+
+ _sentinel._sentinelType = uint8_t(sentinelType);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t sentinelType() const noexcept { return _sentinel._sentinelType; }
+ inline void setSentinelType(uint32_t type) noexcept { _sentinel._sentinelType = uint8_t(type); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Pass]
+// ============================================================================
+
+//! Pass can be used to implement code transformations, analysis, and lowering.
+class ASMJIT_VIRTAPI Pass {
+public:
+ ASMJIT_BASE_CLASS(Pass)
+ ASMJIT_NONCOPYABLE(Pass)
+
+ //! BaseBuilder this pass is assigned to.
+ BaseBuilder* _cb;
+ //! Name of the pass.
+ const char* _name;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API Pass(const char* name) noexcept;
+ ASMJIT_API virtual ~Pass() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline const BaseBuilder* cb() const noexcept { return _cb; }
+ inline const char* name() const noexcept { return _name; }
+
+ //! \}
+
+ //! \name Pass Interface
+ //! \{
+
+ //! Processes the code stored in Builder or Compiler.
+ //!
+ //! This is the only function that is called by the `BaseBuilder` to process
+ //! the code. It passes `zone`, which will be reset after the `run()` finishes.
+ virtual Error run(Zone* zone, Logger* logger) noexcept = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
+#endif // ASMJIT_CORE_BUILDER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/callconv.cpp b/3rdparty/asmjit/src/asmjit/core/callconv.cpp
new file mode 100644
index 00000000000..5d915d0e38d
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/callconv.cpp
@@ -0,0 +1,59 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+#include "../core/func.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86callconv_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armcallconv_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::CallConv - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId) noexcept {
+ reset();
+
+#ifdef ASMJIT_BUILD_X86
+ if (CallConv::isX86Family(ccId))
+ return x86::CallConvInternal::init(*this, ccId);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (CallConv::isArmFamily(ccId))
+ return arm::CallConvInternal::init(*this, ccId);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/callconv.h b/3rdparty/asmjit/src/asmjit/core/callconv.h
new file mode 100644
index 00000000000..f3dc385dfec
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/callconv.h
@@ -0,0 +1,411 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CALLCONV_H_INCLUDED
+#define ASMJIT_CORE_CALLCONV_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/operand.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_func
+//! \{
+
+// ============================================================================
+// [asmjit::CallConv]
+// ============================================================================
+
+//! Function calling convention.
+//!
+//! Function calling convention is a scheme that defines how function parameters
+//! are passed and how function returns its result. AsmJit defines a variety of
+//! architecture and OS specific calling conventions and also provides a compile
+//! time detection to make the code-generation easier.
+struct CallConv {
+ //! Calling convention id, see `Id`.
+ uint8_t _id;
+ //! Architecture id (see `ArchInfo::Id`).
+ uint8_t _archId;
+ //! Register assignment strategy.
+ uint8_t _strategy;
+ //! Flags.
+ uint8_t _flags;
+
+ //! Red zone size (AMD64 == 128 bytes).
+ uint8_t _redZoneSize;
+ //! Spill zone size (WIN64 == 32 bytes).
+ uint8_t _spillZoneSize;
+ //! Natural stack alignment as defined by OS/ABI.
+ uint8_t _naturalStackAlignment;
+ uint8_t _reserved[1];
+
+ //! Mask of all passed registers, per group.
+ uint32_t _passedRegs[BaseReg::kGroupVirt];
+ //! Mask of all preserved registers, per group.
+ uint32_t _preservedRegs[BaseReg::kGroupVirt];
+
+ //! Internal limits of AsmJit's CallConv.
+ enum Limits : uint32_t {
+ kMaxRegArgsPerGroup = 16
+ };
+
+ //! Passed registers' order.
+ union RegOrder {
+ //! Passed registers, ordered.
+ uint8_t id[kMaxRegArgsPerGroup];
+ uint32_t packed[(kMaxRegArgsPerGroup + 3) / 4];
+ };
+
+ //! Passed registers' order, per group.
+ RegOrder _passedOrder[BaseReg::kGroupVirt];
+
+ //! Calling convention id.
+ enum Id : uint32_t {
+ //! None or invalid (can't be used).
+ kIdNone = 0,
+
+ // ------------------------------------------------------------------------
+ // [Universal]
+ // ------------------------------------------------------------------------
+
+ // TODO: To make this possible we need to know target ARCH and ABI.
+
+ /*
+
+ // Universal calling conventions are applicable to any target and are
+ // converted to target dependent conventions at runtime. The purpose of
+ // these conventions is to make using functions less target dependent.
+
+ kIdCDecl = 1,
+ kIdStdCall = 2,
+ kIdFastCall = 3,
+
+ //! AsmJit specific calling convention designed for calling functions
+ //! inside a multimedia code that don't use many registers internally,
+ //! but are long enough to be called and not inlined. These functions are
+ //! usually used to calculate trigonometric functions, logarithms, etc...
+ kIdLightCall2 = 10,
+ kIdLightCall3 = 11,
+ kIdLightCall4 = 12,
+ */
+
+ // ------------------------------------------------------------------------
+ // [X86]
+ // ------------------------------------------------------------------------
+
+ //! X86 `__cdecl` calling convention (used by C runtime and libraries).
+ kIdX86CDecl = 16,
+ //! X86 `__stdcall` calling convention (used mostly by WinAPI).
+ kIdX86StdCall = 17,
+ //! X86 `__thiscall` calling convention (MSVC/Intel).
+ kIdX86MsThisCall = 18,
+ //! X86 `__fastcall` convention (MSVC/Intel).
+ kIdX86MsFastCall = 19,
+ //! X86 `__fastcall` convention (GCC and Clang).
+ kIdX86GccFastCall = 20,
+ //! X86 `regparm(1)` convention (GCC and Clang).
+ kIdX86GccRegParm1 = 21,
+ //! X86 `regparm(2)` convention (GCC and Clang).
+ kIdX86GccRegParm2 = 22,
+ //! X86 `regparm(3)` convention (GCC and Clang).
+ kIdX86GccRegParm3 = 23,
+
+ kIdX86LightCall2 = 29,
+ kIdX86LightCall3 = 30,
+ kIdX86LightCall4 = 31,
+
+ //! X64 calling convention - WIN64-ABI.
+ kIdX86Win64 = 32,
+ //! X64 calling convention - SystemV / AMD64-ABI.
+ kIdX86SysV64 = 33,
+
+ kIdX64LightCall2 = 45,
+ kIdX64LightCall3 = 46,
+ kIdX64LightCall4 = 47,
+
+ // ------------------------------------------------------------------------
+ // [ARM]
+ // ------------------------------------------------------------------------
+
+ //! Legacy calling convention, floating point arguments are passed via GP registers.
+ kIdArm32SoftFP = 48,
+ //! Modern calling convention, uses VFP registers to pass floating point arguments.
+ kIdArm32HardFP = 49,
+
+ // ------------------------------------------------------------------------
+ // [Internal]
+ // ------------------------------------------------------------------------
+
+ //! \cond INTERNAL
+
+ _kIdX86Start = 16,
+ _kIdX86End = 31,
+
+ _kIdX64Start = 32,
+ _kIdX64End = 47,
+
+ _kIdArmStart = 48,
+ _kIdArmEnd = 49,
+
+ //! \endcond
+
+ // ------------------------------------------------------------------------
+ // [Host]
+ // ------------------------------------------------------------------------
+
+#if defined(ASMJIT_DOCGEN)
+
+ //! Default calling convention based on the current C++ compiler's settings.
+ //!
+ //! \note This should be always the same as `kIdHostCDecl`, but some
+ //! compilers allow to override the default calling convention. Overriding
+ //! is not detected at the moment.
+ kIdHost = DETECTED_AT_COMPILE_TIME,
+
+ //! Default CDECL calling convention based on the current C++ compiler's settings.
+ kIdHostCDecl = DETECTED_AT_COMPILE_TIME,
+
+ //! Default STDCALL calling convention based on the current C++ compiler's settings.
+ //!
+ //! \note If not defined by the host then it's the same as `kIdHostCDecl`.
+ kIdHostStdCall = DETECTED_AT_COMPILE_TIME,
+
+ //! Compatibility for `__fastcall` calling convention.
+ //!
+ //! \note If not defined by the host then it's the same as `kIdHostCDecl`.
+ kIdHostFastCall = DETECTED_AT_COMPILE_TIME
+
+#elif ASMJIT_ARCH_X86 == 32
+
+ kIdHost = kIdX86CDecl,
+ kIdHostCDecl = kIdX86CDecl,
+ kIdHostStdCall = kIdX86StdCall,
+
+# if defined(_MSC_VER)
+ kIdHostFastCall = kIdX86MsFastCall,
+# elif defined(__GNUC__)
+ kIdHostFastCall = kIdX86GccFastCall,
+# else
+ kIdHostFastCall = kIdHost,
+# endif
+
+ kIdHostLightCall2 = kIdX86LightCall2,
+ kIdHostLightCall3 = kIdX86LightCall3,
+ kIdHostLightCall4 = kIdX86LightCall4
+
+#elif ASMJIT_ARCH_X86 == 64
+
+# if defined(_WIN32)
+ kIdHost = kIdX86Win64,
+# else
+ kIdHost = kIdX86SysV64,
+# endif
+
+ kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostFastCall = kIdHost, // Doesn't exist, redirected to host.
+
+ kIdHostLightCall2 = kIdX64LightCall2,
+ kIdHostLightCall3 = kIdX64LightCall3,
+ kIdHostLightCall4 = kIdX64LightCall4
+
+#elif ASMJIT_ARCH_ARM == 32
+
+# if defined(__SOFTFP__)
+ kIdHost = kIdArm32SoftFP,
+# else
+ kIdHost = kIdArm32HardFP,
+# endif
+ // These don't exist on ARM.
+ kIdHostCDecl = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostStdCall = kIdHost, // Doesn't exist, redirected to host.
+ kIdHostFastCall = kIdHost // Doesn't exist, redirected to host.
+
+#else
+
+ kIdHost = kIdNone,
+ kIdHostCDecl = kIdHost,
+ kIdHostStdCall = kIdHost,
+ kIdHostFastCall = kIdHost
+
+#endif
+ };
+
+ //! Strategy used to assign registers to function arguments.
+ //!
+ //! This is AsmJit specific. It basically describes how AsmJit should convert
+ //! the function arguments defined by `FuncSignature` into register IDs and
+ //! stack offsets. The default strategy `kStrategyDefault` assigns registers
+ //! and then stack whereas `kStrategyWin64` strategy does register shadowing
+ //! as defined by WIN64 calling convention - it applies to 64-bit calling
+ //! conventions only.
+ enum Strategy : uint32_t {
+ kStrategyDefault = 0, //!< Default register assignment strategy.
+ kStrategyWin64 = 1 //!< WIN64 specific register assignment strategy.
+ };
+
+ //! Calling convention flags.
+ enum Flags : uint32_t {
+ kFlagCalleePopsStack = 0x01, //!< Callee is responsible for cleaning up the stack.
+ kFlagPassFloatsByVec = 0x02, //!< Pass F32 and F64 arguments by VEC128 register.
+ kFlagVectorCall = 0x04, //!< This is a '__vectorcall' calling convention.
+ kFlagIndirectVecArgs = 0x08 //!< Pass vector arguments indirectly (as a pointer).
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API Error init(uint32_t ccId) noexcept;
+
+ inline void reset() noexcept {
+ memset(this, 0, sizeof(*this));
+ memset(_passedOrder, 0xFF, sizeof(_passedOrder));
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the calling convention id, see `Id`.
+ inline uint32_t id() const noexcept { return _id; }
+ //! Sets the calling convention id, see `Id`.
+ inline void setId(uint32_t id) noexcept { _id = uint8_t(id); }
+
+ //! Returns the calling function architecture id.
+ inline uint32_t archId() const noexcept { return _archId; }
+ //! Sets the calling function architecture id.
+ inline void setArchType(uint32_t archId) noexcept { _archId = uint8_t(archId); }
+
+ //! Returns the strategy used to assign registers to arguments, see `Strategy`.
+ inline uint32_t strategy() const noexcept { return _strategy; }
+ //! Sets the strategy used to assign registers to arguments, see `Strategy`.
+ inline void setStrategy(uint32_t strategy) noexcept { _strategy = uint8_t(strategy); }
+
+ //! Tests whether the calling convention has the given `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (uint32_t(_flags) & flag) != 0; }
+ //! Returns the calling convention flags, see `Flags`.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Adds the calling convention flags, see `Flags`.
+ inline void setFlags(uint32_t flag) noexcept { _flags = uint8_t(flag); };
+ //! Adds the calling convention flags, see `Flags`.
+ inline void addFlags(uint32_t flags) noexcept { _flags = uint8_t(_flags | flags); };
+
+ //! Tests whether this calling convention specifies 'RedZone'.
+ inline bool hasRedZone() const noexcept { return _redZoneSize != 0; }
+ //! Tests whether this calling convention specifies 'SpillZone'.
+ inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
+
+ //! Returns size of 'RedZone'.
+ inline uint32_t redZoneSize() const noexcept { return _redZoneSize; }
+ //! Returns size of 'SpillZone'.
+ inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
+
+ //! Sets size of 'RedZone'.
+ inline void setRedZoneSize(uint32_t size) noexcept { _redZoneSize = uint8_t(size); }
+ //! Sets size of 'SpillZone'.
+ inline void setSpillZoneSize(uint32_t size) noexcept { _spillZoneSize = uint8_t(size); }
+
+ //! Returns a natural stack alignment.
+ inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
+ //! Sets a natural stack alignment.
+ //!
+ //! This function can be used to override the default stack alignment in case
+ //! that you know that it's alignment is different. For example it allows to
+ //! implement custom calling conventions that guarantee higher stack alignment.
+ inline void setNaturalStackAlignment(uint32_t value) noexcept { _naturalStackAlignment = uint8_t(value); }
+
+ inline const uint8_t* passedOrder(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _passedOrder[group].id;
+ }
+
+ inline uint32_t passedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _passedRegs[group];
+ }
+
+ inline void _setPassedPacked(uint32_t group, uint32_t p0, uint32_t p1, uint32_t p2, uint32_t p3) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ _passedOrder[group].packed[0] = p0;
+ _passedOrder[group].packed[1] = p1;
+ _passedOrder[group].packed[2] = p2;
+ _passedOrder[group].packed[3] = p3;
+ }
+
+ inline void setPassedToNone(uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ _setPassedPacked(group, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu);
+ _passedRegs[group] = 0u;
+ }
+
+ inline void setPassedOrder(uint32_t group, uint32_t a0, uint32_t a1 = 0xFF, uint32_t a2 = 0xFF, uint32_t a3 = 0xFF, uint32_t a4 = 0xFF, uint32_t a5 = 0xFF, uint32_t a6 = 0xFF, uint32_t a7 = 0xFF) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ // NOTE: This should always be called with all arguments known at compile time,
+ // so even if it looks scary it should be translated into few instructions.
+ _setPassedPacked(group, Support::bytepack32_4x8(a0, a1, a2, a3),
+ Support::bytepack32_4x8(a4, a5, a6, a7),
+ 0xFFFFFFFFu,
+ 0xFFFFFFFFu);
+
+ _passedRegs[group] = (a0 != 0xFF ? 1u << a0 : 0u) |
+ (a1 != 0xFF ? 1u << a1 : 0u) |
+ (a2 != 0xFF ? 1u << a2 : 0u) |
+ (a3 != 0xFF ? 1u << a3 : 0u) |
+ (a4 != 0xFF ? 1u << a4 : 0u) |
+ (a5 != 0xFF ? 1u << a5 : 0u) |
+ (a6 != 0xFF ? 1u << a6 : 0u) |
+ (a7 != 0xFF ? 1u << a7 : 0u) ;
+ }
+
+ inline uint32_t preservedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _preservedRegs[group];
+ }
+
+ inline void setPreservedRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _preservedRegs[group] = regs;
+ }
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ static inline bool isX86Family(uint32_t ccId) noexcept { return ccId >= _kIdX86Start && ccId <= _kIdX64End; }
+ static inline bool isArmFamily(uint32_t ccId) noexcept { return ccId >= _kIdArmStart && ccId <= _kIdArmEnd; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CALLCONV_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/codebufferwriter_p.h b/3rdparty/asmjit/src/asmjit/core/codebufferwriter_p.h
new file mode 100644
index 00000000000..ee7521153a3
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/codebufferwriter_p.h
@@ -0,0 +1,188 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
+
+#include "../core/assembler.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CodeBufferWriter]
+// ============================================================================
+
+//! Helper that is used to write into a `CodeBuffer` held by `BaseAssembler`.
+class CodeBufferWriter {
+public:
+ uint8_t* _cursor;
+
+ ASMJIT_INLINE explicit CodeBufferWriter(BaseAssembler* a) noexcept
+ : _cursor(a->_bufferPtr) {}
+
+ ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
+ size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
+ if (ASMJIT_UNLIKELY(remainingSpace < n)) {
+ CodeBuffer& buffer = a->_section->_buffer;
+ Error err = a->_code->growBuffer(&buffer, n);
+ if (ASMJIT_UNLIKELY(err))
+ return a->reportError(err);
+ _cursor = a->_bufferPtr;
+ }
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
+ ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
+ ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
+
+ ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
+ ASMJIT_ASSERT(_cursor >= from);
+ return (size_t)(_cursor - from);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit8(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor++;
+ }
+
+ template<typename T, typename Y>
+ ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size_t(cond) <= 1u);
+
+ _cursor[0] = uint8_t(U(val) & U(0xFF));
+ _cursor += size_t(cond);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit16uLE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit16uBE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
+ _cursor += 2;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit32uLE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emit32uBE(T val) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
+ _cursor += 4;
+ }
+
+ ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memcpy(_cursor, data, size);
+ _cursor += size;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v & 0xFFu);
+ v >>= 8;
+ }
+ _cursor += size;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ ASMJIT_ASSERT(size <= sizeof(T));
+
+ U v = U(value);
+ for (uint32_t i = 0; i < size; i++) {
+ _cursor[i] = uint8_t(v >> (sizeof(T) - 8));
+ v <<= 8;
+ }
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void emitZeros(size_t size) noexcept {
+ ASMJIT_ASSERT(size != 0);
+ memset(_cursor, 0, size);
+ _cursor += size;
+ }
+
+ ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
+ ASMJIT_ASSERT(where < _cursor);
+
+ uint8_t* p = where;
+ while (++p != _cursor)
+ p[-1] = p[0];
+ _cursor--;
+ }
+
+ template<typename T>
+ ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
+ uint8_t* p = _cursor;
+
+ while (p != where) {
+ p[0] = p[-1];
+ p--;
+ }
+
+ *p = uint8_t(val & 0xFF);
+ _cursor++;
+ }
+
+ ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
+ CodeBuffer& buffer = a->_section->_buffer;
+ size_t newSize = (size_t)(_cursor - a->_bufferData);
+ ASMJIT_ASSERT(newSize <= buffer.capacity());
+
+ a->_bufferPtr = _cursor;
+ buffer._size = Support::max(buffer._size, newSize);
+ }
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/codeholder.cpp b/3rdparty/asmjit/src/asmjit/core/codeholder.cpp
new file mode 100644
index 00000000000..93c9a99df88
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/codeholder.cpp
@@ -0,0 +1,1125 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/assembler.h"
+#include "../core/logging.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Globals]
+// ============================================================================
+
+static const char CodeHolder_addrTabName[] = ".addrtab";
+
+//! Encode MOD byte.
+static inline uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
+ return (m << 6) | (o << 3) | rm;
+}
+
+// ============================================================================
+// [asmjit::LabelLinkIterator]
+// ============================================================================
+
+class LabelLinkIterator {
+public:
+ ASMJIT_INLINE LabelLinkIterator(LabelEntry* le) noexcept { reset(le); }
+
+ ASMJIT_INLINE explicit operator bool() const noexcept { return isValid(); }
+ ASMJIT_INLINE bool isValid() const noexcept { return _link != nullptr; }
+
+ ASMJIT_INLINE LabelLink* link() const noexcept { return _link; }
+ ASMJIT_INLINE LabelLink* operator->() const noexcept { return _link; }
+
+ ASMJIT_INLINE void reset(LabelEntry* le) noexcept {
+ _pPrev = &le->_links;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void next() noexcept {
+ _pPrev = &_link->next;
+ _link = *_pPrev;
+ }
+
+ ASMJIT_INLINE void resolveAndNext(CodeHolder* code) noexcept {
+ LabelLink* linkToDelete = _link;
+
+ _link = _link->next;
+ *_pPrev = _link;
+
+ code->_unresolvedLinkCount--;
+ code->_allocator.release(linkToDelete, sizeof(LabelLink));
+ }
+
+ LabelLink** _pPrev;
+ LabelLink* _link;
+};
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+ErrorHandler::ErrorHandler() noexcept {}
+ErrorHandler::~ErrorHandler() noexcept {}
+
+// ============================================================================
+// [asmjit::CodeHolder - Utilities]
+// ============================================================================
+
+static void CodeHolder_resetInternal(CodeHolder* self, uint32_t resetPolicy) noexcept {
+ uint32_t i;
+ const ZoneVector<BaseEmitter*>& emitters = self->emitters();
+
+ i = emitters.size();
+ while (i)
+ self->detach(emitters[--i]);
+
+ // Reset everything into its construction state.
+ self->_codeInfo.reset();
+ self->_emitterOptions = 0;
+ self->_logger = nullptr;
+ self->_errorHandler = nullptr;
+
+ // Reset all sections.
+ uint32_t numSections = self->_sections.size();
+ for (i = 0; i < numSections; i++) {
+ Section* section = self->_sections[i];
+ if (section->_buffer.data() && !section->_buffer.isExternal())
+ ::free(section->_buffer._data);
+ section->_buffer._data = nullptr;
+ section->_buffer._capacity = 0;
+ }
+
+ // Reset zone allocator and all containers using it.
+ ZoneAllocator* allocator = self->allocator();
+
+ self->_emitters.reset();
+ self->_namedLabels.reset();
+ self->_relocations.reset();
+ self->_labelEntries.reset();
+ self->_sections.reset();
+
+ self->_unresolvedLinkCount = 0;
+ self->_addressTableSection = nullptr;
+ self->_addressTableEntries.reset();
+
+ allocator->reset(&self->_zone);
+ self->_zone.reset(resetPolicy);
+}
+
+static void CodeHolder_modifyEmitterOptions(CodeHolder* self, uint32_t clear, uint32_t add) noexcept {
+ uint32_t oldOpt = self->_emitterOptions;
+ uint32_t newOpt = (oldOpt & ~clear) | add;
+
+ if (oldOpt == newOpt)
+ return;
+
+ // Modify emitter options of `CodeHolder` itself.
+ self->_emitterOptions = newOpt;
+
+ // Modify emitter options of all attached emitters.
+ for (BaseEmitter* emitter : self->emitters()) {
+ emitter->_emitterOptions = (emitter->_emitterOptions & ~clear) | add;
+ emitter->onUpdateGlobalInstOptions();
+ }
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Construction / Destruction]
+// ============================================================================
+
+CodeHolder::CodeHolder() noexcept
+ : _codeInfo(),
+ _emitterOptions(0),
+ _logger(nullptr),
+ _errorHandler(nullptr),
+ _zone(16384 - Zone::kBlockOverhead),
+ _allocator(&_zone),
+ _unresolvedLinkCount(0),
+ _addressTableSection(nullptr) {}
+
+CodeHolder::~CodeHolder() noexcept {
+ CodeHolder_resetInternal(this, Globals::kResetHard);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Init / Reset]
+// ============================================================================
+
+inline void CodeHolder_setSectionDefaultName(
+ Section* section,
+ char c0 = 0, char c1 = 0, char c2 = 0, char c3 = 0,
+ char c4 = 0, char c5 = 0, char c6 = 0, char c7 = 0) noexcept {
+
+ section->_name.u32[0] = Support::bytepack32_4x8(uint8_t(c0), uint8_t(c1), uint8_t(c2), uint8_t(c3));
+ section->_name.u32[1] = Support::bytepack32_4x8(uint8_t(c4), uint8_t(c5), uint8_t(c6), uint8_t(c7));
+}
+
+Error CodeHolder::init(const CodeInfo& info) noexcept {
+ // Cannot reinitialize if it's locked or there is one or more emitter attached.
+ if (isInitialized())
+ return DebugUtils::errored(kErrorAlreadyInitialized);
+
+ // If we are just initializing there should be no emitters attached.
+ ASMJIT_ASSERT(_emitters.empty());
+
+ // Create the default section and insert it to the `_sections` array.
+ Error err = _sections.willGrow(&_allocator);
+ if (err == kErrorOk) {
+ Section* section = _allocator.allocZeroedT<Section>();
+ if (ASMJIT_LIKELY(section)) {
+ section->_flags = Section::kFlagExec | Section::kFlagConst;
+ CodeHolder_setSectionDefaultName(section, '.', 't', 'e', 'x', 't');
+ _sections.appendUnsafe(section);
+ }
+ else {
+ err = DebugUtils::errored(kErrorOutOfMemory);
+ }
+ }
+
+ if (ASMJIT_UNLIKELY(err)) {
+ _zone.reset();
+ return err;
+ }
+ else {
+ _codeInfo = info;
+ return kErrorOk;
+ }
+}
+
+void CodeHolder::reset(uint32_t resetPolicy) noexcept {
+ CodeHolder_resetInternal(this, resetPolicy);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Attach / Detach]
+// ============================================================================
+
+Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
+ // Catch a possible misuse of the API.
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Invalid emitter, this should not be possible.
+ uint32_t type = emitter->emitterType();
+ if (ASMJIT_UNLIKELY(type == BaseEmitter::kTypeNone || type >= BaseEmitter::kTypeCount))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // This is suspicious, but don't fail if `emitter` is already attached
+ // to this code holder. This is not error, but it's not recommended.
+ if (emitter->_code != nullptr) {
+ if (emitter->_code == this)
+ return kErrorOk;
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ // Reserve the space now as we cannot fail after `onAttach()` succeeded.
+ ASMJIT_PROPAGATE(_emitters.willGrow(&_allocator, 1));
+ ASMJIT_PROPAGATE(emitter->onAttach(this));
+
+ // Connect CodeHolder <-> BaseEmitter.
+ ASMJIT_ASSERT(emitter->_code == this);
+ _emitters.appendUnsafe(emitter);
+
+ return kErrorOk;
+}
+
+Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
+ if (ASMJIT_UNLIKELY(!emitter))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(emitter->_code != this))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // NOTE: We always detach if we were asked to, if error happens during
+ // `emitter->onDetach()` we just propagate it, but the BaseEmitter will
+ // be detached.
+ Error err = kErrorOk;
+ if (!emitter->isDestroyed())
+ err = emitter->onDetach(this);
+
+ // Disconnect CodeHolder <-> BaseEmitter.
+ uint32_t index = _emitters.indexOf(emitter);
+ ASMJIT_ASSERT(index != Globals::kNotFound);
+
+ _emitters.removeAt(index);
+ emitter->_code = nullptr;
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Emitter Options]
+// ============================================================================
+
+static constexpr uint32_t kEmitterOptionsFilter = ~uint32_t(BaseEmitter::kOptionLoggingEnabled);
+
+void CodeHolder::addEmitterOptions(uint32_t options) noexcept {
+ CodeHolder_modifyEmitterOptions(this, 0, options & kEmitterOptionsFilter);
+}
+
+void CodeHolder::clearEmitterOptions(uint32_t options) noexcept {
+ CodeHolder_modifyEmitterOptions(this, options & kEmitterOptionsFilter, 0);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Logging & Error Handling]
+// ============================================================================
+
+void CodeHolder::setLogger(Logger* logger) noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ _logger = logger;
+ uint32_t option = !logger ? uint32_t(0) : uint32_t(BaseEmitter::kOptionLoggingEnabled);
+ CodeHolder_modifyEmitterOptions(this, BaseEmitter::kOptionLoggingEnabled, option);
+#else
+ DebugUtils::unused(logger);
+#endif
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Code Buffer]
+// ============================================================================
+
+static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
+ uint8_t* oldData = cb->_data;
+ uint8_t* newData;
+
+ if (oldData && !cb->isExternal())
+ newData = static_cast<uint8_t*>(::realloc(oldData, n));
+ else
+ newData = static_cast<uint8_t*>(::malloc(n));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ cb->_data = newData;
+ cb->_capacity = n;
+
+ // Update pointers used by assemblers, if attached.
+ for (BaseEmitter* emitter : self->emitters()) {
+ if (emitter->isAssembler()) {
+ BaseAssembler* a = static_cast<BaseAssembler*>(emitter);
+ if (&a->_section->_buffer == cb) {
+ size_t offset = a->offset();
+
+ a->_bufferData = newData;
+ a->_bufferEnd = newData + n;
+ a->_bufferPtr = newData + offset;
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::growBuffer(CodeBuffer* cb, size_t n) noexcept {
+ // The size of the section must be valid.
+ size_t size = cb->size();
+ if (ASMJIT_UNLIKELY(n > std::numeric_limits<uintptr_t>::max() - size))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // We can now check if growing the buffer is really necessary. It's unlikely
+ // that this function is called while there is still room for `n` bytes.
+ size_t capacity = cb->capacity();
+ size_t required = cb->size() + n;
+ if (ASMJIT_UNLIKELY(required <= capacity))
+ return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ size_t kInitialCapacity = 8096;
+ if (capacity < kInitialCapacity)
+ capacity = kInitialCapacity;
+ else
+ capacity += Globals::kAllocOverhead;
+
+ do {
+ size_t old = capacity;
+ if (capacity < Globals::kGrowThreshold)
+ capacity *= 2;
+ else
+ capacity += Globals::kGrowThreshold;
+
+ // Overflow.
+ if (ASMJIT_UNLIKELY(old > capacity))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ } while (capacity - Globals::kAllocOverhead < required);
+
+ return CodeHolder_reserveInternal(this, cb, capacity - Globals::kAllocOverhead);
+}
+
+Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
+ size_t capacity = cb->capacity();
+ if (n <= capacity) return kErrorOk;
+
+ if (cb->isFixed())
+ return DebugUtils::errored(kErrorTooLarge);
+
+ return CodeHolder_reserveInternal(this, cb, n);
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Sections]
+// ============================================================================
+
+Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, uint32_t flags, uint32_t alignment) noexcept {
+ *sectionOut = nullptr;
+
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment)))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxSectionNameSize))
+ return DebugUtils::errored(kErrorInvalidSectionName);
+
+ uint32_t sectionId = _sections.size();
+ if (ASMJIT_UNLIKELY(sectionId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManySections);
+
+ ASMJIT_PROPAGATE(_sections.willGrow(&_allocator));
+ Section* section = _allocator.allocZeroedT<Section>();
+
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ section->_id = sectionId;
+ section->_flags = flags;
+ section->_alignment = alignment;
+ memcpy(section->_name.str, name, nameSize);
+ _sections.appendUnsafe(section);
+
+ *sectionOut = section;
+ return kErrorOk;
+}
+
+Section* CodeHolder::sectionByName(const char* name, size_t nameSize) const noexcept {
+ if (nameSize == SIZE_MAX)
+ nameSize = strlen(name);
+
+ // This could be also put in a hash-table similarly like we do with labels,
+ // however it's questionable as the number of sections should be pretty low
+ // in general. Create an issue if this becomes a problem.
+ if (ASMJIT_UNLIKELY(nameSize <= Globals::kMaxSectionNameSize)) {
+ for (Section* section : _sections)
+ if (memcmp(section->_name.str, name, nameSize) == 0 && section->_name.str[nameSize] == '\0')
+ return section;
+ }
+
+ return nullptr;
+}
+
+Section* CodeHolder::ensureAddressTableSection() noexcept {
+ if (_addressTableSection)
+ return _addressTableSection;
+
+ newSection(&_addressTableSection, CodeHolder_addrTabName, sizeof(CodeHolder_addrTabName) - 1, 0, _codeInfo.gpSize());
+ return _addressTableSection;
+}
+
+Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
+ AddressTableEntry* entry = _addressTableEntries.get(address);
+ if (entry)
+ return kErrorOk;
+
+ Section* section = ensureAddressTableSection();
+ if (ASMJIT_UNLIKELY(!section))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ entry = _zone.newT<AddressTableEntry>(address);
+ if (ASMJIT_UNLIKELY(!entry))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _addressTableEntries.insert(entry);
+ section->_virtualSize += _codeInfo.gpSize();
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::CodeHolder - Labels / Symbols]
+// ============================================================================
+
+//! Only used to lookup a label from `_namedLabels`.
+class LabelByName {
+public:
+ inline LabelByName(const char* key, size_t keySize, uint32_t hashCode) noexcept
+ : _key(key),
+ _keySize(uint32_t(keySize)),
+ _hashCode(hashCode) {}
+
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ inline bool matches(const LabelEntry* entry) const noexcept {
+ return entry->nameSize() == _keySize && ::memcmp(entry->name(), _key, _keySize) == 0;
+ }
+
+ const char* _key;
+ uint32_t _keySize;
+ uint32_t _hashCode;
+};
+
+// Returns a hash of `name` and fixes `nameSize` if it's `SIZE_MAX`.
+static uint32_t CodeHolder_hashNameAndGetSize(const char* name, size_t& nameSize) noexcept {
+ uint32_t hashCode = 0;
+ if (nameSize == SIZE_MAX) {
+ size_t i = 0;
+ for (;;) {
+ uint8_t c = uint8_t(name[i]);
+ if (!c) break;
+ hashCode = Support::hashRound(hashCode, c);
+ i++;
+ }
+ nameSize = i;
+ }
+ else {
+ for (size_t i = 0; i < nameSize; i++) {
+ uint8_t c = uint8_t(name[i]);
+ if (ASMJIT_UNLIKELY(!c)) return DebugUtils::errored(kErrorInvalidLabelName);
+ hashCode = Support::hashRound(hashCode, c);
+ }
+ }
+ return hashCode;
+}
+
+static bool CodeHolder_writeDisplacement(void* dst, int64_t displacement, uint32_t displacementSize) {
+ if (displacementSize == 4 && Support::isInt32(displacement)) {
+ Support::writeI32uLE(dst, int32_t(displacement));
+ return true;
+ }
+ else if (displacementSize == 1 && Support::isInt8(displacement)) {
+ Support::writeI8(dst, int8_t(displacement));
+ return true;
+ }
+
+ return false;
+}
+
+LabelLink* CodeHolder::newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept {
+ LabelLink* link = _allocator.allocT<LabelLink>();
+ if (ASMJIT_UNLIKELY(!link)) return nullptr;
+
+ link->next = le->_links;
+ le->_links = link;
+
+ link->sectionId = sectionId;
+ link->relocId = Globals::kInvalidId;
+ link->offset = offset;
+ link->rel = rel;
+
+ _unresolvedLinkCount++;
+ return link;
+}
+
+Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
+ *entryOut = 0;
+
+ uint32_t labelId = _labelEntries.size();
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_setId(labelId);
+ le->_parentId = Globals::kInvalidId;
+ le->_offset = 0;
+ _labelEntries.appendUnsafe(le);
+
+ *entryOut = le;
+ return kErrorOk;
+}
+
+Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId) noexcept {
+ *entryOut = 0;
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+
+ if (ASMJIT_UNLIKELY(nameSize == 0))
+ return DebugUtils::errored(kErrorInvalidLabelName);
+
+ if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
+ return DebugUtils::errored(kErrorLabelNameTooLong);
+
+ switch (type) {
+ case Label::kTypeLocal:
+ if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size()))
+ return DebugUtils::errored(kErrorInvalidParentLabel);
+
+ hashCode ^= parentId;
+ break;
+
+ case Label::kTypeGlobal:
+ if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
+ return DebugUtils::errored(kErrorNonLocalLabelCantHaveParent);
+
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+
+ // Don't allow to insert duplicates. Local labels allow duplicates that have
+ // different id, this is already accomplished by having a different hashes
+ // between the same label names having different parent labels.
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode));
+ if (ASMJIT_UNLIKELY(le))
+ return DebugUtils::errored(kErrorLabelAlreadyDefined);
+
+ Error err = kErrorOk;
+ uint32_t labelId = _labelEntries.size();
+
+ if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyLabels);
+
+ ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
+ le = _allocator.allocZeroedT<LabelEntry>();
+
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ le->_hashCode = hashCode;
+ le->_setId(labelId);
+ le->_type = uint8_t(type);
+ le->_parentId = Globals::kInvalidId;
+ le->_offset = 0;
+ ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize));
+
+ _labelEntries.appendUnsafe(le);
+ _namedLabels.insert(allocator(), le);
+
+ *entryOut = le;
+ return err;
+}
+
+uint32_t CodeHolder::labelIdByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ // TODO: Finalize - parent id is not used here?
+ DebugUtils::unused(parentId);
+
+ uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
+ if (ASMJIT_UNLIKELY(!nameSize)) return 0;
+
+ LabelEntry* le = _namedLabels.get(LabelByName(name, nameSize, hashCode));
+ return le ? le->id() : uint32_t(Globals::kInvalidId);
+}
+
+ASMJIT_API Error CodeHolder::resolveUnresolvedLinks() noexcept {
+ if (!hasUnresolvedLinks())
+ return kErrorOk;
+
+ Error err = kErrorOk;
+ for (LabelEntry* le : labelEntries()) {
+ if (!le->isBound())
+ continue;
+
+ LabelLinkIterator link(le);
+ if (link) {
+ Support::FastUInt8 of = 0;
+ Section* toSection = le->section();
+ uint64_t toOffset = Support::addOverflow(toSection->offset(), le->offset(), &of);
+
+ do {
+ uint32_t linkSectionId = link->sectionId;
+ if (link->relocId == Globals::kInvalidId) {
+ Section* fromSection = sectionById(linkSectionId);
+ size_t linkOffset = link->offset;
+
+ CodeBuffer& buf = _sections[linkSectionId]->buffer();
+ ASMJIT_ASSERT(linkOffset < buf.size());
+
+ // Calculate the offset relative to the start of the virtual base.
+ uint64_t fromOffset = Support::addOverflow<uint64_t>(fromSection->offset(), linkOffset, &of);
+ int64_t displacement = int64_t(toOffset - fromOffset + uint64_t(int64_t(link->rel)));
+
+ if (!of) {
+ ASMJIT_ASSERT(size_t(linkOffset) < buf.size());
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ uint32_t displacementSize = buf._data[linkOffset];
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize);
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) {
+ link.resolveAndNext(this);
+ continue;
+ }
+ }
+
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ // Falls through to `link.next()`.
+ }
+
+ link.next();
+ } while (link);
+ }
+ }
+
+ return err;
+}
+
+ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId, uint64_t toOffset) noexcept {
+ LabelEntry* le = labelEntry(label);
+ if (ASMJIT_UNLIKELY(!le))
+ return DebugUtils::errored(kErrorInvalidLabel);
+
+ if (ASMJIT_UNLIKELY(toSectionId > _sections.size()))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ // Label can be bound only once.
+ if (ASMJIT_UNLIKELY(le->isBound()))
+ return DebugUtils::errored(kErrorLabelAlreadyBound);
+
+ // Bind the label.
+ Section* section = _sections[toSectionId];
+ le->_section = section;
+ le->_offset = toOffset;
+
+ Error err = kErrorOk;
+ CodeBuffer& buf = section->buffer();
+
+ // Fix all links to this label we have collected so far if they are within
+ // the same section. We ignore any inter-section links as these have to be
+ // fixed later.
+ LabelLinkIterator link(le);
+ while (link) {
+ uint32_t linkSectionId = link->sectionId;
+ size_t linkOffset = link->offset;
+
+ uint32_t relocId = link->relocId;
+ if (relocId != Globals::kInvalidId) {
+ // Adjust relocation data only.
+ RelocEntry* re = _relocations[relocId];
+ re->_payload += toOffset;
+ re->_targetSectionId = toSectionId;
+ }
+ else {
+ if (linkSectionId != toSectionId) {
+ link.next();
+ continue;
+ }
+
+ ASMJIT_ASSERT(linkOffset < buf.size());
+ int64_t displacement = int64_t(toOffset - uint64_t(linkOffset) + uint64_t(int64_t(link->rel)));
+
+ // Size of the value we are going to patch. Only BYTE/DWORD is allowed.
+ uint32_t displacementSize = buf._data[linkOffset];
+ ASMJIT_ASSERT(buf.size() - size_t(linkOffset) >= displacementSize);
+
+ // Overwrite a real displacement in the CodeBuffer.
+ if (!CodeHolder_writeDisplacement(buf._data + linkOffset, displacement, displacementSize)) {
+ err = DebugUtils::errored(kErrorInvalidDisplacement);
+ link.next();
+ continue;
+ }
+ }
+
+ link.resolveAndNext(this);
+ }
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Relocations]
+// ============================================================================
+
+Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept {
+ ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
+
+ uint32_t relocId = _relocations.size();
+ if (ASMJIT_UNLIKELY(relocId == Globals::kInvalidId))
+ return DebugUtils::errored(kErrorTooManyRelocations);
+
+ RelocEntry* re = _allocator.allocZeroedT<RelocEntry>();
+ if (ASMJIT_UNLIKELY(!re))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ re->_id = relocId;
+ re->_relocType = uint8_t(relocType);
+ re->_valueSize = uint8_t(valueSize);
+ re->_sourceSectionId = Globals::kInvalidId;
+ re->_targetSectionId = Globals::kInvalidId;
+ _relocations.appendUnsafe(re);
+
+ *dst = re;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Expression Evaluation]
+// ============================================================================
+
+static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, uint64_t* out) noexcept {
+ uint64_t value[2];
+ for (size_t i = 0; i < 2; i++) {
+ uint64_t v;
+ switch (exp->valueType[i]) {
+ case Expression::kValueNone: {
+ v = 0;
+ break;
+ }
+
+ case Expression::kValueConstant: {
+ v = exp->value[i].constant;
+ break;
+ }
+
+ case Expression::kValueLabel: {
+ LabelEntry* le = exp->value[i].label;
+ if (!le->isBound())
+ return DebugUtils::errored(kErrorExpressionLabelNotBound);
+ v = le->section()->offset() + le->offset();
+ break;
+ }
+
+ case Expression::kValueExpression: {
+ Expression* nested = exp->value[i].expression;
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(self, nested, &v));
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ value[i] = v;
+ }
+
+ uint64_t result;
+ uint64_t& a = value[0];
+ uint64_t& b = value[1];
+
+ switch (exp->opType) {
+ case Expression::kOpAdd:
+ result = a + b;
+ break;
+
+ case Expression::kOpSub:
+ result = a - b;
+ break;
+
+ case Expression::kOpMul:
+ result = a * b;
+ break;
+
+ case Expression::kOpSll:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a << b);
+ break;
+
+ case Expression::kOpSrl:
+ result = (b > 63) ? uint64_t(0) : uint64_t(a >> b);
+ break;
+
+ case Expression::kOpSra:
+ result = Support::sar(a, Support::min<uint64_t>(b, 63));
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ *out = result;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Utilities]
+// ============================================================================
+
+Error CodeHolder::flatten() noexcept {
+ uint64_t offset = 0;
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ if (ASMJIT_UNLIKELY(alignedOffset < offset))
+ return DebugUtils::errored(kErrorTooLarge);
+
+ Support::FastUInt8 of = 0;
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+
+ if (ASMJIT_UNLIKELY(of))
+ return DebugUtils::errored(kErrorTooLarge);
+ }
+ }
+
+ // Now we know that we can assign offsets of all sections properly.
+ Section* prev = nullptr;
+ offset = 0;
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+ if (realSize)
+ offset = Support::alignUp(offset, section->alignment());
+ section->_offset = offset;
+
+ // Make sure the previous section extends a bit to cover the alignment.
+ if (prev)
+ prev->_virtualSize = offset - prev->_offset;
+
+ prev = section;
+ offset += realSize;
+ }
+
+ return kErrorOk;
+}
+
+size_t CodeHolder::codeSize() const noexcept {
+ Support::FastUInt8 of = 0;
+ uint64_t offset = 0;
+
+ for (Section* section : _sections) {
+ uint64_t realSize = section->realSize();
+
+ if (realSize) {
+ uint64_t alignedOffset = Support::alignUp(offset, section->alignment());
+ ASMJIT_ASSERT(alignedOffset >= offset);
+ offset = Support::addOverflow(alignedOffset, realSize, &of);
+ }
+ }
+
+ // TODO: Not nice, maybe changing `codeSize()` to return `uint64_t` instead?
+ if ((sizeof(uint64_t) > sizeof(size_t) && offset > SIZE_MAX) || of)
+ return SIZE_MAX;
+
+ return size_t(offset);
+}
+
+Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
+ // Base address must be provided.
+ if (ASMJIT_UNLIKELY(baseAddress == Globals::kNoBaseAddress))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ _codeInfo.setBaseAddress(baseAddress);
+ uint32_t gpSize = _codeInfo.gpSize();
+
+ Section* addressTableSection = _addressTableSection;
+ uint32_t addressTableEntryCount = 0;
+ uint8_t* addressTableEntryData = nullptr;
+
+ if (addressTableSection) {
+ ASMJIT_PROPAGATE(
+ reserveBuffer(&addressTableSection->_buffer, size_t(addressTableSection->virtualSize())));
+ addressTableEntryData = addressTableSection->_buffer.data();
+ }
+
+ // Relocate all recorded locations.
+ for (const RelocEntry* re : _relocations) {
+ // Possibly deleted or optimized-out entry.
+ if (re->relocType() == RelocEntry::kTypeNone)
+ continue;
+
+ Section* sourceSection = sectionById(re->sourceSectionId());
+ Section* targetSection = nullptr;
+
+ if (re->targetSectionId() != Globals::kInvalidId)
+ targetSection = sectionById(re->targetSectionId());
+
+ uint64_t value = re->payload();
+ uint64_t sectionOffset = sourceSection->offset();
+ uint64_t sourceOffset = re->sourceOffset();
+
+ // Make sure that the `RelocEntry` doesn't go out of bounds.
+ size_t regionSize = re->leadingSize() + re->valueSize() + re->trailingSize();
+ if (ASMJIT_UNLIKELY(re->sourceOffset() >= sourceSection->bufferSize() ||
+ sourceSection->bufferSize() - size_t(re->sourceOffset()) < regionSize))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ uint8_t* buffer = sourceSection->data();
+ size_t valueOffset = size_t(re->sourceOffset()) + re->leadingSize();
+
+ switch (re->relocType()) {
+ case RelocEntry::kTypeExpression: {
+ Expression* expression = (Expression*)(uintptr_t(value));
+ ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(this, expression, &value));
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToAbs: {
+ break;
+ }
+
+ case RelocEntry::kTypeRelToAbs: {
+ // Value is currently a relative offset from the start of its section.
+ // We have to convert it to an absolute offset (including base address).
+ if (ASMJIT_UNLIKELY(!targetSection))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ //value += baseAddress + sectionOffset + sourceOffset + regionSize;
+ value += baseAddress + targetSection->offset();
+ break;
+ }
+
+ case RelocEntry::kTypeAbsToRel: {
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (gpSize > 4 && !Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+ break;
+ }
+
+ case RelocEntry::kTypeX64AddressEntry: {
+ if (re->valueSize() != 4 || re->leadingSize() < 2)
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // First try whether a relative 32-bit displacement would work.
+ value -= baseAddress + sectionOffset + sourceOffset + regionSize;
+ if (!Support::isInt32(int64_t(value))) {
+ // Relative 32-bit displacement is not possible, use '.addrtab' section.
+ AddressTableEntry* atEntry = _addressTableEntries.get(re->payload());
+ if (ASMJIT_UNLIKELY(!atEntry))
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+
+ // Cannot be null as we have just matched the `AddressTableEntry`.
+ ASMJIT_ASSERT(addressTableSection != nullptr);
+
+ if (!atEntry->hasAssignedSlot())
+ atEntry->_slot = addressTableEntryCount++;
+
+ size_t atEntryIndex = size_t(atEntry->slot()) * gpSize;
+ uint64_t addrSrc = sectionOffset + sourceOffset + regionSize;
+ uint64_t addrDst = addressTableSection->offset() + uint64_t(atEntryIndex);
+
+ value = addrDst - addrSrc;
+ if (!Support::isInt32(int64_t(value)))
+ return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
+
+ // Bytes that replace [REX, OPCODE] bytes.
+ uint32_t byte0 = 0xFF;
+ uint32_t byte1 = buffer[valueOffset - 1];
+
+ if (byte1 == 0xE8) {
+ // Patch CALL/MOD byte to FF /2 (-> 0x15).
+ byte1 = x86EncodeMod(0, 2, 5);
+ }
+ else if (byte1 == 0xE9) {
+ // Patch JMP/MOD byte to FF /4 (-> 0x25).
+ byte1 = x86EncodeMod(0, 4, 5);
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ // Patch `jmp/call` instruction.
+ buffer[valueOffset - 2] = uint8_t(byte0);
+ buffer[valueOffset - 1] = uint8_t(byte1);
+
+ Support::writeU64uLE(addressTableEntryData + atEntryIndex, re->payload());
+ }
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+
+ switch (re->valueSize()) {
+ case 1:
+ Support::writeU8(buffer + valueOffset, uint32_t(value & 0xFFu));
+ break;
+
+ case 2:
+ Support::writeU16uLE(buffer + valueOffset, uint32_t(value & 0xFFFFu));
+ break;
+
+ case 4:
+ Support::writeU32uLE(buffer + valueOffset, uint32_t(value & 0xFFFFFFFFu));
+ break;
+
+ case 8:
+ Support::writeU64uLE(buffer + valueOffset, value);
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidRelocEntry);
+ }
+ }
+
+ // Fixup the virtual size of the address table if it's the last section.
+ if (_sections.last() == addressTableSection) {
+ size_t addressTableSize = addressTableEntryCount * gpSize;
+ addressTableSection->_buffer._size = addressTableSize;
+ addressTableSection->_virtualSize = addressTableSize;
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t options) noexcept {
+ if (ASMJIT_UNLIKELY(!isSectionValid(sectionId)))
+ return DebugUtils::errored(kErrorInvalidSection);
+
+ Section* section = sectionById(sectionId);
+ size_t bufferSize = section->bufferSize();
+
+ if (ASMJIT_UNLIKELY(dstSize < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ memcpy(dst, section->data(), bufferSize);
+
+ if (bufferSize < dstSize && (options & kCopyWithPadding)) {
+ size_t paddingSize = dstSize - bufferSize;
+ memset(static_cast<uint8_t*>(dst) + bufferSize, 0, paddingSize);
+ }
+
+ return kErrorOk;
+}
+
+Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t options) noexcept {
+ size_t end = 0;
+ for (Section* section : _sections) {
+ if (section->offset() > dstSize)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ size_t bufferSize = section->bufferSize();
+ size_t offset = size_t(section->offset());
+
+ if (ASMJIT_UNLIKELY(dstSize - offset < bufferSize))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint8_t* dstTarget = static_cast<uint8_t*>(dst) + offset;
+ size_t paddingSize = 0;
+ memcpy(dstTarget, section->data(), bufferSize);
+
+ if ((options & kCopyWithPadding) && bufferSize < section->virtualSize()) {
+ paddingSize = Support::min<size_t>(dstSize - offset, size_t(section->virtualSize())) - bufferSize;
+ memset(dstTarget + bufferSize, 0, paddingSize);
+ }
+
+ end = Support::max(end, offset + bufferSize + paddingSize);
+ }
+
+ // TODO: `end` is not used atm, we need an option to also pad anything beyond
+ // the code in case that the destination was much larger (for example page-size).
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/codeholder.h b/3rdparty/asmjit/src/asmjit/core/codeholder.h
new file mode 100644
index 00000000000..5f6a21d6241
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/codeholder.h
@@ -0,0 +1,930 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+#define ASMJIT_CORE_CODEHOLDER_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/datatypes.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/target.h"
+#include "../core/zone.h"
+#include "../core/zonehash.h"
+#include "../core/zonestring.h"
+#include "../core/zonetree.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+class CodeHolder;
+class LabelEntry;
+class Logger;
+
+// ============================================================================
+// [asmjit::AlignMode]
+// ============================================================================
+
+//! Align mode.
+enum AlignMode : uint32_t {
+ kAlignCode = 0, //!< Align executable code.
+ kAlignData = 1, //!< Align non-executable code.
+ kAlignZero = 2, //!< Align by a sequence of zeros.
+ kAlignCount = 3 //!< Count of alignment modes.
+};
+
+// ============================================================================
+// [asmjit::ErrorHandler]
+// ============================================================================
+
+//! Error handler can be used to override the default behavior of error handling
+//! available to all classes that inherit `BaseEmitter`.
+//!
+//! Override `ErrorHandler::handleError()` to implement your own error handler.
+class ASMJIT_VIRTAPI ErrorHandler {
+public:
+ ASMJIT_BASE_CLASS(ErrorHandler)
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Creates a new `ErrorHandler` instance.
+ ASMJIT_API ErrorHandler() noexcept;
+ //! Destroys the `ErrorHandler` instance.
+ ASMJIT_API virtual ~ErrorHandler() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Handle Error]
+ // --------------------------------------------------------------------------
+
+ //! Error handler (must be reimplemented).
+ //!
+ //! Error handler is called after an error happened and before it's propagated
+ //! to the caller. There are multiple ways how the error handler can be used:
+ //!
+ //! 1. User-based error handling without throwing exception or using C's
+ //! `longjmp()`. This is for users that don't use exceptions and want
+ //! customized error handling.
+ //!
+ //! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
+ //! exception-safe, but you can throw exception from your error handler if
+ //! this way is the preferred way of handling errors in your project.
+ //!
+ //! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
+ //! `BaseEmitter` to a consistent state before calling `handleError()`
+ //! so `longjmp()` can be used without any issues to cancel the code
+ //! generation if an error occurred. There is no difference between
+ //! exceptions and `longjmp()` from AsmJit's perspective, however,
+ //! never jump outside of `CodeHolder` and `BaseEmitter` scope as you
+ //! would leak memory.
+ virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
+};
+
+// ============================================================================
+// [asmjit::CodeBuffer]
+// ============================================================================
+
+//! Code or data buffer.
+struct CodeBuffer {
+ //! The content of the buffer (data).
+ uint8_t* _data;
+ //! Number of bytes of `data` used.
+ size_t _size;
+ //! Buffer capacity (in bytes).
+ size_t _capacity;
+ //! Buffer flags.
+ uint32_t _flags;
+
+ enum Flags : uint32_t {
+ //! Buffer is external (not allocated by asmjit).
+ kFlagIsExternal = 0x00000001u,
+ //! Buffer is fixed (cannot be reallocated).
+ kFlagIsFixed = 0x00000002u
+ };
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline uint8_t& operator[](size_t index) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+
+ inline const uint8_t& operator[](size_t index) const noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return _data[index];
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ inline bool isAllocated() const noexcept { return _data != nullptr; }
+ inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
+ inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
+
+ inline uint8_t* data() noexcept { return _data; }
+ inline const uint8_t* data() const noexcept { return _data; }
+
+ inline bool empty() const noexcept { return !_size; }
+ inline size_t size() const noexcept { return _size; }
+ inline size_t capacity() const noexcept { return _capacity; }
+
+ //! \}
+
+ //! \name Iterators
+ //! \{
+
+ inline uint8_t* begin() noexcept { return _data; }
+ inline const uint8_t* begin() const noexcept { return _data; }
+
+ inline uint8_t* end() noexcept { return _data + _size; }
+ inline const uint8_t* end() const noexcept { return _data + _size; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Section]
+// ============================================================================
+
+//! Section entry.
+class Section {
+public:
+ //! Section id.
+ uint32_t _id;
+ //! Section flags.
+ uint32_t _flags;
+ //! Section alignment requirements (0 if no requirements).
+ uint32_t _alignment;
+ //! Reserved for future use (padding).
+ uint32_t _reserved;
+ //! Offset of this section from base-address.
+ uint64_t _offset;
+ //! Virtual size of the section (zero initialized sections).
+ uint64_t _virtualSize;
+ //! Section name (max 35 characters, PE allows max 8).
+ FixedString<Globals::kMaxSectionNameSize + 1> _name;
+ //! Code or data buffer.
+ CodeBuffer _buffer;
+
+ //! Section flags.
+ enum Flags : uint32_t {
+ kFlagExec = 0x00000001u, //!< Executable (.text sections).
+ kFlagConst = 0x00000002u, //!< Read-only (.text and .data sections).
+ kFlagZero = 0x00000004u, //!< Zero initialized by the loader (BSS).
+ kFlagInfo = 0x00000008u, //!< Info / comment flag.
+ kFlagImplicit = 0x80000000u //!< Section created implicitly and can be deleted by `Target`.
+ };
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t id() const noexcept { return _id; }
+ inline const char* name() const noexcept { return _name.str; }
+
+ inline uint8_t* data() noexcept { return _buffer.data(); }
+ inline const uint8_t* data() const noexcept { return _buffer.data(); }
+
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ inline uint32_t alignment() const noexcept { return _alignment; }
+ inline void setAlignment(uint32_t alignment) noexcept { _alignment = alignment; }
+
+ inline uint64_t offset() const noexcept { return _offset; }
+ inline void setOffset(uint64_t offset) noexcept { _offset = offset; }
+
+ //! Returns the virtual size of the section.
+ //!
+ //! Virtual size is initially zero and is never changed by AsmJit. It's normal
+ //! if virtual size is smaller than size returned by `bufferSize()` as the buffer
+ //! stores real data emitted by assemblers or appended by users.
+ //!
+ //! Use `realSize()` to get the real and final size of this section.
+ inline uint64_t virtualSize() const noexcept { return _virtualSize; }
+ //! Sets the virtual size of the section.
+ inline void setVirtualSize(uint64_t virtualSize) noexcept { _virtualSize = virtualSize; }
+
+ //! Returns the buffer size of the section.
+ inline size_t bufferSize() const noexcept { return _buffer.size(); }
+ //! Returns the real size of the section calculated from virtual and buffer sizes.
+ inline uint64_t realSize() const noexcept { return Support::max<uint64_t>(virtualSize(), bufferSize()); }
+
+ //! Returns the `CodeBuffer` used by this section.
+ inline CodeBuffer& buffer() noexcept { return _buffer; }
+ //! Returns the `CodeBuffer` used by this section (const).
+ inline const CodeBuffer& buffer() const noexcept { return _buffer; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LabelLink]
+// ============================================================================
+
+//! Data structure used to link either unbound labels or cross-section links.
+struct LabelLink {
+ //! Next link (single-linked list).
+ LabelLink* next;
+ //! Section id where the label is bound.
+ uint32_t sectionId;
+ //! Relocation id or Globals::kInvalidId.
+ uint32_t relocId;
+ //! Label offset relative to the start of the section.
+ size_t offset;
+ //! Inlined rel8/rel32.
+ intptr_t rel;
+};
+
+// ============================================================================
+// [asmjit::Expression]
+// ============================================================================
+
+struct Expression {
+ enum OpType : uint8_t {
+ kOpAdd = 0,
+ kOpSub = 1,
+ kOpMul = 2,
+ kOpSll = 3,
+ kOpSrl = 4,
+ kOpSra = 5
+ };
+
+ enum ValueType : uint8_t {
+ kValueNone = 0,
+ kValueConstant = 1,
+ kValueLabel = 2,
+ kValueExpression = 3
+ };
+
+ union Value {
+ uint64_t constant;
+ Expression* expression;
+ LabelEntry* label;
+ };
+
+ uint8_t opType;
+ uint8_t valueType[2];
+ uint8_t reserved[5];
+ Value value[2];
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ inline void setValueAsConstant(size_t index, uint64_t constant) noexcept {
+ valueType[index] = kValueConstant;
+ value[index].constant = constant;
+ }
+
+ inline void setValueAsLabel(size_t index, LabelEntry* label) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].label = label;
+ }
+
+ inline void setValueAsExpression(size_t index, Expression* expression) noexcept {
+ valueType[index] = kValueLabel;
+ value[index].expression = expression;
+ }
+};
+
+// ============================================================================
+// [asmjit::LabelEntry]
+// ============================================================================
+
+//! Label entry.
+//!
+//! Contains the following properties:
+//! * Label id - This is the only thing that is set to the `Label` operand.
+//! * Label name - Optional, used mostly to create executables and libraries.
+//! * Label type - Type of the label, default `Label::kTypeAnonymous`.
+//! * Label parent id - Derived from many assemblers that allow to define a
+//! local label that falls under a global label. This allows to define
+//! many labels of the same name that have different parent (global) label.
+//! * Offset - offset of the label bound by `Assembler`.
+//! * Links - single-linked list that contains locations of code that has
+//! to be patched when the label gets bound. Every use of unbound label
+//! adds one link to `_links` list.
+//! * HVal - Hash value of label's name and optionally parentId.
+//! * HashNext - Hash-table implementation detail.
+class LabelEntry : public ZoneHashNode {
+public:
+ // Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has
+ // granularity of 32 bytes anyway). This gives `_name` the remaining space,
+ // which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
+ static constexpr uint32_t kStaticNameSize =
+ 64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*));
+
+ //! Label type, see `Label::LabelType`.
+ uint8_t _type;
+ //! Must be zero.
+ uint8_t _flags;
+ //! Reserved.
+ uint16_t _reserved16;
+ //! Label parent id or zero.
+ uint32_t _parentId;
+ //! Label offset relative to the start of the `_section`.
+ uint64_t _offset;
+ //! Section where the label was bound.
+ Section* _section;
+ //! Label links.
+ LabelLink* _links;
+ //! Label name.
+ ZoneString<kStaticNameSize> _name;
+
+ //! \name Accessors
+ //! \{
+
+ // NOTE: Label id is stored in `_customData`, which is provided by ZoneHashNode
+ // to fill a padding that a C++ compiler targeting 64-bit CPU will add to align
+ // the structure to 64-bits.
+
+ //! Returns label id.
+ inline uint32_t id() const noexcept { return _customData; }
+ //! Sets label id (internal, used only by `CodeHolder`).
+ inline void _setId(uint32_t id) noexcept { _customData = id; }
+
+ //! Returns label type, see `Label::LabelType`.
+ inline uint32_t type() const noexcept { return _type; }
+ //! Returns label flags, returns 0 at the moment.
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ //! Tests whether the label has a parent label.
+ inline bool hasParent() const noexcept { return _parentId != Globals::kInvalidId; }
+ //! Returns label's parent id.
+ inline uint32_t parentId() const noexcept { return _parentId; }
+
+ //! Returns the section where the label was bound.
+ //!
+ //! If the label was not yet bound the return value is `nullptr`.
+ inline Section* section() const noexcept { return _section; }
+
+ //! Tests whether the label has name.
+ inline bool hasName() const noexcept { return !_name.empty(); }
+
+ //! Returns the label's name.
+ //!
+ //! \note Local labels will return their local name without their parent
+ //! part, for example ".L1".
+ inline const char* name() const noexcept { return _name.data(); }
+
+ //! Returns size of label's name.
+ //!
+ //! \note Label name is always null terminated, so you can use `strlen()` to
+ //! get it, however, it's also cached in `LabelEntry` itself, so if you want
+ //! to know the size the fastest way is to call `LabelEntry::nameSize()`.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns links associated with this label.
+ inline LabelLink* links() const noexcept { return _links; }
+
+ //! Tests whether the label is bound.
+ inline bool isBound() const noexcept { return _section != nullptr; }
+ //! Tests whether the label is bound to a the given `sectionId`.
+ inline bool isBoundTo(Section* section) const noexcept { return _section == section; }
+
+ //! Returns the label offset (only useful if the label is bound).
+ inline uint64_t offset() const noexcept { return _offset; }
+
+ //! Returns the hash-value of label's name and its parent label (if any).
+ //!
+ //! Label hash is calculated as `HASH(Name) ^ ParentId`. The hash function
+ //! is implemented in `Support::hashString()` and `Support::hashRound()`.
+ inline uint32_t hashCode() const noexcept { return _hashCode; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RelocEntry]
+// ============================================================================
+
+//! Relocation entry.
+//!
+//! We describe relocation data in the following way:
+//!
+//! ```
+//! +- Start of the buffer +- End of the data
+//! | |*PATCHED*| | or instruction
+//! |xxxxxxxxxxxxxxxxxxxxxx|LeadSize|ValueSize|TrailSize|xxxxxxxxxxxxxxxxxxxx->
+//! |
+//! +- Source offset
+//! ```
+struct RelocEntry {
+ //! Relocation id.
+ uint32_t _id;
+ //! Type of the relocation.
+ uint8_t _relocType;
+ //! Size of the relocation data/value (1, 2, 4 or 8 bytes).
+ uint8_t _valueSize;
+ //! Number of bytes after `_sourceOffset` to reach the value to be patched.
+ uint8_t _leadingSize;
+ //! Number of bytes after `_sourceOffset + _valueSize` to reach end of the
+ //! instruction.
+ uint8_t _trailingSize;
+ //! Source section id.
+ uint32_t _sourceSectionId;
+ //! Target section id.
+ uint32_t _targetSectionId;
+ //! Source offset (relative to start of the section).
+ uint64_t _sourceOffset;
+ //! Payload (target offset, target address, expression, etc).
+ uint64_t _payload;
+
+ //! Relocation type.
+ enum RelocType : uint32_t {
+ //! None/deleted (no relocation).
+ kTypeNone = 0,
+ //! Expression evaluation, `_payload` is pointer to `Expression`.
+ kTypeExpression = 1,
+ //! Relocate absolute to absolute.
+ kTypeAbsToAbs = 2,
+ //! Relocate relative to absolute.
+ kTypeRelToAbs = 3,
+ //! Relocate absolute to relative.
+ kTypeAbsToRel = 4,
+ //! Relocate absolute to relative or use trampoline.
+ kTypeX64AddressEntry = 5
+ };
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t id() const noexcept { return _id; }
+
+ inline uint32_t relocType() const noexcept { return _relocType; }
+ inline uint32_t valueSize() const noexcept { return _valueSize; }
+
+ inline uint32_t leadingSize() const noexcept { return _leadingSize; }
+ inline uint32_t trailingSize() const noexcept { return _trailingSize; }
+
+ inline uint32_t sourceSectionId() const noexcept { return _sourceSectionId; }
+ inline uint32_t targetSectionId() const noexcept { return _targetSectionId; }
+
+ inline uint64_t sourceOffset() const noexcept { return _sourceOffset; }
+ inline uint64_t payload() const noexcept { return _payload; }
+
+ Expression* payloadAsExpression() const noexcept {
+ return reinterpret_cast<Expression*>(uintptr_t(_payload));
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::AddressTableEntry]
+// ============================================================================
+
+class AddressTableEntry : public ZoneTreeNodeT<AddressTableEntry> {
+public:
+ ASMJIT_NONCOPYABLE(AddressTableEntry)
+
+ uint64_t _address;
+ uint32_t _slot;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit AddressTableEntry(uint64_t address) noexcept
+ : _address(address),
+ _slot(0xFFFFFFFFu) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint64_t address() const noexcept { return _address; }
+ inline uint32_t slot() const noexcept { return _slot; }
+
+ inline bool hasAssignedSlot() const noexcept { return _slot != 0xFFFFFFFFu; }
+
+ inline bool operator<(const AddressTableEntry& other) const noexcept { return _address < other._address; }
+ inline bool operator>(const AddressTableEntry& other) const noexcept { return _address > other._address; }
+
+ inline bool operator<(uint64_t queryAddress) const noexcept { return _address < queryAddress; }
+ inline bool operator>(uint64_t queryAddress) const noexcept { return _address > queryAddress; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::CodeHolder]
+// ============================================================================
+
+//! Contains basic information about the target architecture plus its settings,
+//! and holds code & data (including sections, labels, and relocation information).
+//! CodeHolder can store both binary and intermediate representation of assembly,
+//! which can be generated by `BaseAssembler` and/or `BaseBuilder`.
+//!
+//! \note `CodeHolder` has ability to attach an `ErrorHandler`, however, the
+//! error handler is not triggered by `CodeHolder` itself, it's only used by
+//! emitters attached to `CodeHolder`.
+class CodeHolder {
+public:
+ ASMJIT_NONCOPYABLE(CodeHolder)
+
+ //! Basic information about the code (architecture and other info).
+ CodeInfo _codeInfo;
+ //! Emitter options, propagated to all emitters when changed.
+ uint32_t _emitterOptions;
+
+ //! Attached `Logger`, used by all consumers.
+ Logger* _logger;
+ //! Attached `ErrorHandler`.
+ ErrorHandler* _errorHandler;
+
+ //! Code zone (used to allocate core structures).
+ Zone _zone;
+ //! Zone allocator, used to manage internal containers.
+ ZoneAllocator _allocator;
+
+ //! Attached code emitters.
+ ZoneVector<BaseEmitter*> _emitters;
+ //! Section entries.
+ ZoneVector<Section*> _sections;
+ //! Label entries.
+ ZoneVector<LabelEntry*> _labelEntries;
+ //! Relocation entries.
+ ZoneVector<RelocEntry*> _relocations;
+ //! Label name -> LabelEntry (only named labels).
+ ZoneHash<LabelEntry> _namedLabels;
+
+ //! Count of label links, which are not resolved.
+ size_t _unresolvedLinkCount;
+ //! Pointer to an address table section (or null if this section doesn't exist).
+ Section* _addressTableSection;
+ //! Address table entries.
+ ZoneTree<AddressTableEntry> _addressTableEntries;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates an uninitialized CodeHolder (you must init() it before it can be used).
+ ASMJIT_API CodeHolder() noexcept;
+ //! Destroys the CodeHolder.
+ ASMJIT_API ~CodeHolder() noexcept;
+
+ inline bool isInitialized() const noexcept { return _codeInfo.isInitialized(); }
+
+ //! Initializes CodeHolder to hold code described by `codeInfo`.
+ ASMJIT_API Error init(const CodeInfo& info) noexcept;
+ //! Detaches all code-generators attached and resets the `CodeHolder`.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Attach & Detach
+ //! \{
+
+ //! Attaches an emitter to this `CodeHolder`.
+ ASMJIT_API Error attach(BaseEmitter* emitter) noexcept;
+ //! Detaches an emitter from this `CodeHolder`.
+ ASMJIT_API Error detach(BaseEmitter* emitter) noexcept;
+
+ //! \}
+
+ //! \name Allocators
+ //! \{
+
+ inline ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
+
+ //! \}
+
+ //! \name Code Emitter
+ //! \{
+
+ inline const ZoneVector<BaseEmitter*>& emitters() const noexcept { return _emitters; }
+
+ //! Returns global emitter options, internally propagated to all attached emitters.
+ inline uint32_t emitterOptions() const noexcept { return _emitterOptions; }
+
+ //! Enables the given global emitter `options` and propagates the resulting
+ //! options to all attached emitters.
+ ASMJIT_API void addEmitterOptions(uint32_t options) noexcept;
+
+ //! Disables the given global emitter `options` and propagates the resulting
+ //! options to all attached emitters.
+ ASMJIT_API void clearEmitterOptions(uint32_t options) noexcept;
+
+ //! \}
+
+ //! \name Code & Architecture
+ //! \{
+
+ //! Returns the target architecture information, see `ArchInfo`.
+ inline const ArchInfo& archInfo() const noexcept { return _codeInfo.archInfo(); }
+ //! Returns the target code information, see `CodeInfo`.
+ inline const CodeInfo& codeInfo() const noexcept { return _codeInfo; }
+
+ //! Returns the target architecture id.
+ inline uint32_t archId() const noexcept { return archInfo().archId(); }
+ //! Returns the target architecture sub-id.
+ inline uint32_t archSubId() const noexcept { return archInfo().archSubId(); }
+
+ //! Tests whether a static base-address is set.
+ inline bool hasBaseAddress() const noexcept { return _codeInfo.hasBaseAddress(); }
+ //! Returns a static base-address (uint64_t).
+ inline uint64_t baseAddress() const noexcept { return _codeInfo.baseAddress(); }
+
+ //! \}
+
+ //! \name Logging & Error Handling
+ //! \{
+
+ //! Returns the attached logger.
+ inline Logger* logger() const noexcept { return _logger; }
+ //! Attaches a `logger` to CodeHolder and propagates it to all attached emitters.
+ ASMJIT_API void setLogger(Logger* logger) noexcept;
+ //! Resets the logger to none.
+ inline void resetLogger() noexcept { setLogger(nullptr); }
+
+ //! Tests whether the global error handler is attached.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+ //! Returns the global error handler.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+ //! Sets the global error handler.
+ inline void setErrorHandler(ErrorHandler* handler) noexcept { _errorHandler = handler; }
+ //! Resets the global error handler to none.
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! \}
+
+ //! \name Code Buffer
+ //! \{
+
+ ASMJIT_API Error growBuffer(CodeBuffer* cb, size_t n) noexcept;
+ ASMJIT_API Error reserveBuffer(CodeBuffer* cb, size_t n) noexcept;
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ //! Returns an array of `Section*` records.
+ inline const ZoneVector<Section*>& sections() const noexcept { return _sections; }
+ //! Returns the number of sections.
+ inline uint32_t sectionCount() const noexcept { return _sections.size(); }
+
+ //! Tests whether the given `sectionId` is valid.
+ inline bool isSectionValid(uint32_t sectionId) const noexcept { return sectionId < _sections.size(); }
+
+ //! Creates a new section and return its pointer in `sectionOut`.
+ //!
+ //! Returns `Error`, does not report a possible error to `ErrorHandler`.
+ ASMJIT_API Error newSection(Section** sectionOut, const char* name, size_t nameSize = SIZE_MAX, uint32_t flags = 0, uint32_t alignment = 1) noexcept;
+
+ //! Returns a section entry of the given index.
+ inline Section* sectionById(uint32_t sectionId) const noexcept { return _sections[sectionId]; }
+
+ //! Returns section-id that matches the given `name`.
+ //!
+ //! If there is no such section `Section::kInvalidId` is returned.
+ ASMJIT_API Section* sectionByName(const char* name, size_t nameSize = SIZE_MAX) const noexcept;
+
+ //! Returns '.text' section (section that commonly represents code).
+ //!
+ //! \note Text section is always the first section in `CodeHolder::sections()` array.
+ inline Section* textSection() const noexcept { return _sections[0]; }
+
+ //! Tests whether '.addrtab' section exists.
+ inline bool hasAddressTable() const noexcept { return _addressTableSection != nullptr; }
+
+ //! Returns '.addrtab' section.
+ //!
+ //! This section is used exclusively by AsmJit to store absolute 64-bit
+ //! addresses that cannot be encoded in instructions like 'jmp' or 'call'.
+ inline Section* addressTableSection() const noexcept { return _addressTableSection; }
+
+ //! Ensures that '.addrtab' section exists (creates it if it doesn't) and
+ //! returns it. Can return `nullptr` on out of memory condition.
+ ASMJIT_API Section* ensureAddressTableSection() noexcept;
+
+ //! Used to add an address to an address table.
+ //!
+ //! This implicitly calls `ensureAddressTableSection()` and then creates
+ //! `AddressTableEntry` that is inserted to `_addressTableEntries`. If the
+ //! address already exists this operation does nothing as the same addresses
+ //! use the same slot.
+ //!
+ //! This function should be considered internal as it's used by assemblers to
+ //! insert an absolute address into the address table. Inserting address into
+ //! address table without creating a particula relocation entry makes no sense.
+ ASMJIT_API Error addAddressToAddressTable(uint64_t address) noexcept;
+
+ //! \}
+
+ //! \name Labels & Symbols
+ //! \{
+
+ //! Returns array of `LabelEntry*` records.
+ inline const ZoneVector<LabelEntry*>& labelEntries() const noexcept { return _labelEntries; }
+
+ //! Returns number of labels created.
+ inline uint32_t labelCount() const noexcept { return _labelEntries.size(); }
+
+ //! Tests whether the label having `id` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(uint32_t labelId) const noexcept {
+ return labelId < _labelEntries.size();
+ }
+
+ //! Tests whether the `label` is valid (i.e. created by `newLabelEntry()`).
+ inline bool isLabelValid(const Label& label) const noexcept {
+ return label.id() < _labelEntries.size();
+ }
+
+ //! \overload
+ inline bool isLabelBound(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) && _labelEntries[labelId]->isBound();
+ }
+
+ //! Tests whether the `label` is already bound.
+ //!
+ //! Returns `false` if the `label` is not valid.
+ inline bool isLabelBound(const Label& label) const noexcept {
+ return isLabelBound(label.id());
+ }
+
+ //! Returns LabelEntry of the given label `id`.
+ inline LabelEntry* labelEntry(uint32_t labelId) const noexcept {
+ return isLabelValid(labelId) ? _labelEntries[labelId] : static_cast<LabelEntry*>(nullptr);
+ }
+
+ //! Returns LabelEntry of the given `label`.
+ inline LabelEntry* labelEntry(const Label& label) const noexcept {
+ return labelEntry(label.id());
+ }
+
+ //! Returns offset of a `Label` by its `labelId`.
+ //!
+ //! The offset returned is relative to the start of the section. Zero offset
+ //! is returned for unbound labels, which is their initial offset value.
+ inline uint64_t labelOffset(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ return _labelEntries[labelId]->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffset(const Label& label) const noexcept {
+ return labelOffset(label.id());
+ }
+
+ //! Returns offset of a label by it's `labelId` relative to the base offset.
+ //!
+ //! \remarks The offset of the section where the label is bound must be valid
+ //! in order to use this function, otherwise the value returned will not be
+ //! reliable.
+ inline uint64_t labelOffsetFromBase(uint32_t labelId) const noexcept {
+ ASMJIT_ASSERT(isLabelValid(labelId));
+ const LabelEntry* le = _labelEntries[labelId];
+ return (le->isBound() ? le->section()->offset() : uint64_t(0)) + le->offset();
+ }
+
+ //! \overload
+ inline uint64_t labelOffsetFromBase(const Label& label) const noexcept {
+ return labelOffsetFromBase(label.id());
+ }
+
+ //! Creates a new anonymous label and return its id in `idOut`.
+ //!
+ //! Returns `Error`, does not report error to `ErrorHandler`.
+ ASMJIT_API Error newLabelEntry(LabelEntry** entryOut) noexcept;
+
+ //! Creates a new named label label-type `type`.
+ //!
+ //! Returns `Error`, does not report a possible error to `ErrorHandler`.
+ ASMJIT_API Error newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Returns a label id by name.
+ ASMJIT_API uint32_t labelIdByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ inline Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept {
+ return Label(labelIdByName(name, nameSize, parentId));
+ }
+
+ //! Tests whether there are any unresolved label links.
+ inline bool hasUnresolvedLinks() const noexcept { return _unresolvedLinkCount != 0; }
+ //! Returns the number of label links, which are unresolved.
+ inline size_t unresolvedLinkCount() const noexcept { return _unresolvedLinkCount; }
+
+ //! Creates a new label-link used to store information about yet unbound labels.
+ //!
+ //! Returns `null` if the allocation failed.
+ ASMJIT_API LabelLink* newLabelLink(LabelEntry* le, uint32_t sectionId, size_t offset, intptr_t rel) noexcept;
+
+ //! Resolves cross-section links (`LabelLink`) associated with each label that
+ //! was used as a destination in code of a different section. It's only useful
+ //! to people that use multiple sections as it will do nothing if the code only
+ //! contains a single section in which cross-section links are not possible.
+ ASMJIT_API Error resolveUnresolvedLinks() noexcept;
+
+ //! Binds a label to a given `sectionId` and `offset` (relative to start of the section).
+ //!
+ //! This function is generally used by `BaseAssembler::bind()` to do the heavy lifting.
+ ASMJIT_API Error bindLabel(const Label& label, uint32_t sectionId, uint64_t offset) noexcept;
+
+ //! \}
+
+ //! \name Relocations
+ //! \{
+
+ //! Tests whether the code contains relocation entries.
+ inline bool hasRelocEntries() const noexcept { return !_relocations.empty(); }
+ //! Returns array of `RelocEntry*` records.
+ inline const ZoneVector<RelocEntry*>& relocEntries() const noexcept { return _relocations; }
+
+ //! Returns a RelocEntry of the given `id`.
+ inline RelocEntry* relocEntry(uint32_t id) const noexcept { return _relocations[id]; }
+
+ //! Creates a new relocation entry of type `relocType` and size `valueSize`.
+ //!
+ //! Additional fields can be set after the relocation entry was created.
+ ASMJIT_API Error newRelocEntry(RelocEntry** dst, uint32_t relocType, uint32_t valueSize) noexcept;
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Flattens all sections by recalculating their offsets, starting at 0.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error flatten() noexcept;
+
+ //! Returns computed the size of code & data of all sections.
+ //!
+ //! \note All sections will be iterated over and the code size returned
+ //! would represent the minimum code size of all combined sections after
+ //! applying minimum alignment. Code size may decrease after calling
+ //! `flatten()` and `relocateToBase()`.
+ ASMJIT_API size_t codeSize() const noexcept;
+
+ //! Relocates the code to the given `baseAddress`.
+ //!
+ //! \param baseAddress Absolute base address where the code will be relocated
+ //! to. Please note that nothing is copied to such base address, it's just an
+ //! absolute value used by the relocator to resolve all stored relocations.
+ //!
+ //! \note This should never be called more than once.
+ ASMJIT_API Error relocateToBase(uint64_t baseAddress) noexcept;
+
+ //! Options that can be used with \ref copySectionData().
+ enum CopyOptions : uint32_t {
+ //! If virtual size of the section is larger than the size of its buffer
+ //! then all bytes between buffer size and virtual size will be zeroed.
+ kCopyWithPadding = 0x1
+ };
+
+ //! Copies a single section into `dst`.
+ ASMJIT_API Error copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t options = 0) noexcept;
+
+ //! Copies all sections into `dst`.
+ //!
+ //! This should only be used if the data was flattened and there are no gaps
+ //! between the sections. The `dstSize` is always checked and the copy will
+ //! never write anything outside the provided buffer.
+ ASMJIT_API Error copyFlattenedData(void* dst, size_t dstSize, uint32_t options = 0) noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CODEHOLDER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/compiler.cpp b/3rdparty/asmjit/src/asmjit/core/compiler.cpp
new file mode 100644
index 00000000000..13dbf54300a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/compiler.cpp
@@ -0,0 +1,669 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/compiler.h"
+#include "../core/cpuinfo.h"
+#include "../core/logging.h"
+#include "../core/rapass_p.h"
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::GlobalConstPoolPass]
+// ============================================================================
+
+class GlobalConstPoolPass : public Pass {
+ ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
+ typedef Pass Base;
+
+ GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
+
+ Error run(Zone* zone, Logger* logger) noexcept override {
+ DebugUtils::unused(zone, logger);
+
+ // Flush the global constant pool.
+ BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
+ if (compiler->_globalConstPool) {
+ compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
+ compiler->_globalConstPool = nullptr;
+ }
+ return kErrorOk;
+ }
+};
+
+// ============================================================================
+// [asmjit::FuncCallNode - Arg / Ret]
+// ============================================================================
+
+bool FuncCallNode::_setArg(uint32_t i, const Operand_& op) noexcept {
+ if ((i & ~kFuncArgHi) >= _funcDetail.argCount())
+ return false;
+
+ _args[i] = op;
+ return true;
+}
+
+bool FuncCallNode::_setRet(uint32_t i, const Operand_& op) noexcept {
+ if (i >= 2)
+ return false;
+
+ _rets[i] = op;
+ return true;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Construction / Destruction]
+// ============================================================================
+
+BaseCompiler::BaseCompiler() noexcept
+ : BaseBuilder(),
+ _func(nullptr),
+ _vRegZone(4096 - Zone::kBlockOverhead),
+ _vRegArray(),
+ _localConstPool(nullptr),
+ _globalConstPool(nullptr) {
+
+ _type = kTypeCompiler;
+}
+BaseCompiler::~BaseCompiler() noexcept {}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Function API]
+// ============================================================================
+
+FuncNode* BaseCompiler::newFunc(const FuncSignature& sign) noexcept {
+ Error err;
+
+ FuncNode* func = newNodeT<FuncNode>();
+ if (ASMJIT_UNLIKELY(!func)) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ err = registerLabelNode(func);
+ if (ASMJIT_UNLIKELY(err)) {
+ // TODO: Calls reportError, maybe rethink noexcept?
+ reportError(err);
+ return nullptr;
+ }
+
+ // Create helper nodes.
+ func->_exitNode = newLabelNode();
+ func->_end = newNodeT<SentinelNode>(SentinelNode::kSentinelFuncEnd);
+
+ if (ASMJIT_UNLIKELY(!func->_exitNode || !func->_end)) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ // Initialize the function info.
+ err = func->detail().init(sign);
+ if (ASMJIT_UNLIKELY(err)) {
+ reportError(err);
+ return nullptr;
+ }
+
+ // If the Target guarantees greater stack alignment than required by the
+ // calling convention then override it as we can prevent having to perform
+ // dynamic stack alignment
+ if (func->_funcDetail._callConv.naturalStackAlignment() < _codeInfo.stackAlignment())
+ func->_funcDetail._callConv.setNaturalStackAlignment(_codeInfo.stackAlignment());
+
+ // Initialize the function frame.
+ err = func->_frame.init(func->_funcDetail);
+ if (ASMJIT_UNLIKELY(err)) {
+ reportError(err);
+ return nullptr;
+ }
+
+ // Allocate space for function arguments.
+ func->_args = nullptr;
+ if (func->argCount() != 0) {
+ func->_args = _allocator.allocT<VirtReg*>(func->argCount() * sizeof(VirtReg*));
+ if (ASMJIT_UNLIKELY(!func->_args)) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ memset(func->_args, 0, func->argCount() * sizeof(VirtReg*));
+ }
+
+ return func;
+}
+
+FuncNode* BaseCompiler::addFunc(FuncNode* func) {
+ ASMJIT_ASSERT(_func == nullptr);
+ _func = func;
+
+ addNode(func); // Function node.
+ BaseNode* prev = cursor(); // {CURSOR}.
+ addNode(func->exitNode()); // Function exit label.
+ addNode(func->endNode()); // Function end marker.
+
+ _setCursor(prev);
+ return func;
+}
+
+FuncNode* BaseCompiler::addFunc(const FuncSignature& sign) {
+ FuncNode* func = newFunc(sign);
+
+ if (!func) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ return addFunc(func);
+}
+
+Error BaseCompiler::endFunc() {
+ FuncNode* func = _func;
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ // Add the local constant pool at the end of the function (if exists).
+ if (_localConstPool) {
+ setCursor(func->endNode()->prev());
+ addNode(_localConstPool);
+ _localConstPool = nullptr;
+ }
+
+ // Mark as finished.
+ _func = nullptr;
+
+ SentinelNode* end = func->endNode();
+ setCursor(end);
+ return kErrorOk;
+}
+
+Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) {
+ FuncNode* func = _func;
+
+ if (ASMJIT_UNLIKELY(!func))
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+
+ if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
+ return reportError(DebugUtils::errored(kErrorInvalidVirtId));
+
+ VirtReg* vReg = virtRegByReg(r);
+ func->setArg(argIndex, vReg);
+
+ return kErrorOk;
+}
+
+FuncRetNode* BaseCompiler::newRet(const Operand_& o0, const Operand_& o1) noexcept {
+ FuncRetNode* node = newNodeT<FuncRetNode>();
+ if (!node) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ node->setOp(0, o0);
+ node->setOp(1, o1);
+ node->setOpCount(!o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u);
+
+ return node;
+}
+
+FuncRetNode* BaseCompiler::addRet(const Operand_& o0, const Operand_& o1) noexcept {
+ FuncRetNode* node = newRet(o0, o1);
+ if (!node) return nullptr;
+ return addNode(node)->as<FuncRetNode>();
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Call]
+// ============================================================================
+
+FuncCallNode* BaseCompiler::newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
+ FuncCallNode* node = newNodeT<FuncCallNode>(instId, 0u);
+ if (ASMJIT_UNLIKELY(!node)) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ node->setOpCount(1);
+ node->setOp(0, o0);
+ node->resetOp(1);
+ node->resetOp(2);
+ node->resetOp(3);
+
+ Error err = node->detail().init(sign);
+ if (ASMJIT_UNLIKELY(err)) {
+ reportError(err);
+ return nullptr;
+ }
+
+ // If there are no arguments skip the allocation.
+ uint32_t nArgs = sign.argCount();
+ if (!nArgs) return node;
+
+ node->_args = static_cast<Operand*>(_allocator.alloc(nArgs * sizeof(Operand)));
+ if (!node->_args) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ memset(node->_args, 0, nArgs * sizeof(Operand));
+ return node;
+}
+
+FuncCallNode* BaseCompiler::addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept {
+ FuncCallNode* node = newCall(instId, o0, sign);
+ if (!node) return nullptr;
+ return addNode(node)->as<FuncCallNode>();
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Vars]
+// ============================================================================
+
+static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
+ uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
+
+ char buf[64];
+ int size = snprintf(buf, ASMJIT_ARRAY_SIZE(buf), "%%%u", unsigned(index));
+
+ ASMJIT_ASSERT(size > 0 && size < int(ASMJIT_ARRAY_SIZE(buf)));
+ vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
+}
+
+VirtReg* BaseCompiler::newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept {
+ uint32_t index = _vRegArray.size();
+ if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount)))
+ return nullptr;
+
+ if (_vRegArray.willGrow(&_allocator) != kErrorOk)
+ return nullptr;
+
+ VirtReg* vReg = _vRegZone.allocZeroedT<VirtReg>();
+ if (ASMJIT_UNLIKELY(!vReg)) return nullptr;
+
+ uint32_t size = Type::sizeOf(typeId);
+ uint32_t alignment = Support::min<uint32_t>(size, 64);
+
+ vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (name && name[0] != '\0')
+ vReg->_name.setData(&_dataZone, name, SIZE_MAX);
+ else
+ BaseCompiler_assignGenericName(this, vReg);
+#else
+ DebugUtils::unused(name);
+#endif
+
+ _vRegArray.appendUnsafe(vReg);
+ return vReg;
+}
+
+Error BaseCompiler::_newReg(BaseReg& out, uint32_t typeId, const char* name) {
+ RegInfo regInfo;
+
+ Error err = ArchUtils::typeIdToRegInfo(archId(), typeId, regInfo);
+ if (ASMJIT_UNLIKELY(err)) return reportError(err);
+
+ VirtReg* vReg = newVirtReg(typeId, regInfo.signature(), name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ out._initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg& out, uint32_t typeId, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, typeId, sb.data());
+}
+
+Error BaseCompiler::_newReg(BaseReg& out, const BaseReg& ref, const char* name) {
+ RegInfo regInfo;
+ uint32_t typeId;
+
+ if (isVirtRegValid(ref)) {
+ VirtReg* vRef = virtRegByReg(ref);
+ typeId = vRef->typeId();
+
+ // NOTE: It's possible to cast one register type to another if it's the
+ // same register group. However, VirtReg always contains the TypeId that
+ // was used to create the register. This means that in some cases we may
+ // end up having different size of `ref` and `vRef`. In such case we
+ // adjust the TypeId to match the `ref` register type instead of the
+ // original register type, which should be the expected behavior.
+ uint32_t typeSize = Type::sizeOf(typeId);
+ uint32_t refSize = ref.size();
+
+ if (typeSize != refSize) {
+ if (Type::isInt(typeId)) {
+ // GP register - change TypeId to match `ref`, but keep sign of `vRef`.
+ switch (refSize) {
+ case 1: typeId = Type::kIdI8 | (typeId & 1); break;
+ case 2: typeId = Type::kIdI16 | (typeId & 1); break;
+ case 4: typeId = Type::kIdI32 | (typeId & 1); break;
+ case 8: typeId = Type::kIdI64 | (typeId & 1); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else if (Type::isMmx(typeId)) {
+ // MMX register - always use 64-bit.
+ typeId = Type::kIdMmx64;
+ }
+ else if (Type::isMask(typeId)) {
+ // Mask register - change TypeId to match `ref` size.
+ switch (refSize) {
+ case 1: typeId = Type::kIdMask8; break;
+ case 2: typeId = Type::kIdMask16; break;
+ case 4: typeId = Type::kIdMask32; break;
+ case 8: typeId = Type::kIdMask64; break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+ else {
+ // VEC register - change TypeId to match `ref` size, keep vector metadata.
+ uint32_t elementTypeId = Type::baseOf(typeId);
+
+ switch (refSize) {
+ case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
+ case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
+ case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
+ default: typeId = Type::kIdVoid; break;
+ }
+ }
+
+ if (typeId == Type::kIdVoid)
+ return reportError(DebugUtils::errored(kErrorInvalidState));
+ }
+ }
+ else {
+ typeId = ref.type();
+ }
+
+ Error err = ArchUtils::typeIdToRegInfo(archId(), typeId, regInfo);
+ if (ASMJIT_UNLIKELY(err)) return reportError(err);
+
+ VirtReg* vReg = newVirtReg(typeId, regInfo.signature(), name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ out._initReg(regInfo.signature(), vReg->id());
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newRegFmt(BaseReg& out, const BaseReg& ref, const char* fmt, ...) {
+ va_list ap;
+ StringTmp<256> sb;
+
+ va_start(ap, fmt);
+ sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ return _newReg(out, ref, sb.data());
+}
+
+Error BaseCompiler::_newStack(BaseMem& out, uint32_t size, uint32_t alignment, const char* name) {
+ if (size == 0)
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment == 0)
+ alignment = 1;
+
+ if (!Support::isPowerOf2(alignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment > 64)
+ alignment = 64;
+
+ VirtReg* vReg = newVirtReg(0, 0, name);
+ if (ASMJIT_UNLIKELY(!vReg)) {
+ out.reset();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ vReg->_virtSize = size;
+ vReg->_isStack = true;
+ vReg->_alignment = uint8_t(alignment);
+
+ // Set the memory operand to GPD/GPQ and its id to VirtReg.
+ out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
+ return kErrorOk;
+}
+
+Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) noexcept {
+ if (!isVirtIdValid(virtId))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+
+ if (newAlignment && !Support::isPowerOf2(newAlignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (newAlignment > 64)
+ newAlignment = 64;
+
+ VirtReg* vReg = virtRegById(virtId);
+ if (newSize)
+ vReg->_virtSize = newSize;
+
+ if (newAlignment)
+ vReg->_alignment = uint8_t(newAlignment);
+
+ // This is required if the RAPass is already running. There is a chance that
+ // a stack-slot has been already allocated and in that case it has to be
+ // updated as well, otherwise we would allocate wrong amount of memory.
+ RAWorkReg* workReg = vReg->_workReg;
+ if (workReg && workReg->_stackSlot) {
+ workReg->_stackSlot->_size = vReg->_virtSize;
+ workReg->_stackSlot->_alignment = vReg->_alignment;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::_newConst(BaseMem& out, uint32_t scope, const void* data, size_t size) {
+ ConstPoolNode** pPool;
+ if (scope == ConstPool::kScopeLocal)
+ pPool = &_localConstPool;
+ else if (scope == ConstPool::kScopeGlobal)
+ pPool = &_globalConstPool;
+ else
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ ConstPoolNode* pool = *pPool;
+ if (!pool) {
+ pool = newConstPoolNode();
+ if (ASMJIT_UNLIKELY(!pool))
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ *pPool = pool;
+ }
+
+ size_t off;
+ Error err = pool->add(data, size, off);
+
+ if (ASMJIT_UNLIKELY(err))
+ return reportError(err);
+
+ out = BaseMem(BaseMem::Decomposed {
+ Label::kLabelTag, // Base type.
+ pool->id(), // Base id.
+ 0, // Index type.
+ 0, // Index id.
+ int32_t(off), // Offset.
+ uint32_t(size), // Size.
+ 0 // Flags.
+ });
+ return kErrorOk;
+}
+
+void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
+ if (!reg.isVirtReg()) return;
+
+ VirtReg* vReg = virtRegById(reg.id());
+ if (!vReg) return;
+
+ if (fmt && fmt[0] != '\0') {
+ char buf[128];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ va_end(ap);
+
+ vReg->_name.setData(&_dataZone, buf, SIZE_MAX);
+ }
+ else {
+ BaseCompiler_assignGenericName(this, vReg);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Jump Annotations]
+// ============================================================================
+
+JumpNode* BaseCompiler::newJumpNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) noexcept {
+ uint32_t opCount = 1;
+ JumpNode* node = _allocator.allocT<JumpNode>();
+ if (ASMJIT_UNLIKELY(!node))
+ return nullptr;
+
+ node = new(node) JumpNode(this, instId, instOptions, opCount, annotation);
+ node->setOp(0, o0);
+ node->resetOps(opCount, JumpNode::kBaseOpCapacity);
+ return node;
+}
+
+Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
+ uint32_t options = instOptions() | globalInstOptions();
+ const char* comment = inlineComment();
+
+ JumpNode* node = newJumpNode(instId, options, o0, annotation);
+
+ resetInstOptions();
+ resetInlineComment();
+
+ if (ASMJIT_UNLIKELY(!node)) {
+ resetExtraReg();
+ return reportError(DebugUtils::errored(kErrorOutOfMemory));
+ }
+
+ node->setExtraReg(extraReg());
+ if (comment)
+ node->setInlineComment(static_cast<char*>(_dataZone.dup(comment, strlen(comment), true)));
+
+ addNode(node);
+ resetExtraReg();
+ return kErrorOk;
+}
+
+JumpAnnotation* BaseCompiler::newJumpAnnotation() {
+ if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ uint32_t id = _jumpAnnotations.size();
+ JumpAnnotation* jumpAnnotation = _allocator.newT<JumpAnnotation>(this, id);
+
+ if (!jumpAnnotation) {
+ reportError(DebugUtils::errored(kErrorOutOfMemory));
+ return nullptr;
+ }
+
+ _jumpAnnotations.appendUnsafe(jumpAnnotation);
+ return jumpAnnotation;
+}
+
+// ============================================================================
+// [asmjit::BaseCompiler - Events]
+// ============================================================================
+
+Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ Error err = addPassT<GlobalConstPoolPass>();
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ return kErrorOk;
+}
+
+Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
+ _func = nullptr;
+ _localConstPool = nullptr;
+ _globalConstPool = nullptr;
+
+ _vRegArray.reset();
+ _vRegZone.reset();
+
+ return Base::onDetach(code);
+}
+
+// ============================================================================
+// [asmjit::FuncPass - Construction / Destruction]
+// ============================================================================
+
+FuncPass::FuncPass(const char* name) noexcept
+ : Pass(name) {}
+
+// ============================================================================
+// [asmjit::FuncPass - Run]
+// ============================================================================
+
+Error FuncPass::run(Zone* zone, Logger* logger) noexcept {
+ BaseNode* node = cb()->firstNode();
+ if (!node) return kErrorOk;
+
+ do {
+ if (node->type() == BaseNode::kNodeFunc) {
+ FuncNode* func = node->as<FuncNode>();
+ node = func->endNode();
+ ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
+ }
+
+ // Find a function by skipping all nodes that are not `kNodeFunc`.
+ do {
+ node = node->next();
+ } while (node && node->type() != BaseNode::kNodeFunc);
+ } while (node);
+
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/core/compiler.h b/3rdparty/asmjit/src/asmjit/core/compiler.h
new file mode 100644
index 00000000000..32b2a8bb271
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/compiler.h
@@ -0,0 +1,674 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
+#define ASMJIT_CORE_COMPILER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/assembler.h"
+#include "../core/builder.h"
+#include "../core/constpool.h"
+#include "../core/func.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+struct RATiedReg;
+class RAWorkReg;
+
+class JumpAnnotation;
+
+class JumpNode;
+class FuncNode;
+class FuncRetNode;
+class FuncCallNode;
+
+//! \addtogroup asmjit_compiler
+//! \{
+
+// ============================================================================
+// [asmjit::VirtReg]
+// ============================================================================
+
+//! Virtual register data (BaseCompiler).
+class VirtReg {
+public:
+ ASMJIT_NONCOPYABLE(VirtReg)
+
+ //! Virtual register id.
+ uint32_t _id;
+ //! Virtual register info (signature).
+ RegInfo _info;
+ //! Virtual register size (can be smaller than `regInfo._size`).
+ uint32_t _virtSize;
+ //! Virtual register alignment (for spilling).
+ uint8_t _alignment;
+ //! Type-id.
+ uint8_t _typeId;
+ //! Virtual register weight for alloc/spill decisions.
+ uint8_t _weight;
+ //! True if this is a fixed register, never reallocated.
+ uint8_t _isFixed : 1;
+ //! True if the virtual register is only used as a stack (never accessed as register).
+ uint8_t _isStack : 1;
+ uint8_t _reserved : 6;
+
+ //! Virtual register name (user provided or automatically generated).
+ ZoneString<16> _name;
+
+ // -------------------------------------------------------------------------
+ // The following members are used exclusively by RAPass. They are initialized
+ // when the VirtReg is created to NULL pointers and then changed during RAPass
+ // execution. RAPass sets them back to NULL before it returns.
+ // -------------------------------------------------------------------------
+
+ //! Reference to `RAWorkReg`, used during register allocation.
+ RAWorkReg* _workReg;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
+ : _id(id),
+ _virtSize(virtSize),
+ _alignment(uint8_t(alignment)),
+ _typeId(uint8_t(typeId)),
+ _weight(1),
+ _isFixed(false),
+ _isStack(false),
+ _reserved(0),
+ _name(),
+ _workReg(nullptr) { _info._signature = signature; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the virtual register id.
+ inline uint32_t id() const noexcept { return _id; }
+
+ //! Returns the virtual register name.
+ inline const char* name() const noexcept { return _name.data(); }
+ //! Returns the size of the virtual register name.
+ inline uint32_t nameSize() const noexcept { return _name.size(); }
+
+ //! Returns a register information that wraps the register signature.
+ inline const RegInfo& info() const noexcept { return _info; }
+ //! Returns a virtual register type (maps to the physical register type as well).
+ inline uint32_t type() const noexcept { return _info.type(); }
+ //! Returns a virtual register group (maps to the physical register group as well).
+ inline uint32_t group() const noexcept { return _info.group(); }
+
+ //! Returns a real size of the register this virtual register maps to.
+ //!
+ //! For example if this is a 128-bit SIMD register used for a scalar single
+ //! precision floating point value then its virtSize would be 4, however, the
+ //! `regSize` would still say 16 (128-bits), because it's the smallest size
+ //! of that register type.
+ inline uint32_t regSize() const noexcept { return _info.size(); }
+
+ //! Returns a register signature of this virtual register.
+ inline uint32_t signature() const noexcept { return _info.signature(); }
+
+ //! Returns the virtual register size.
+ //!
+ //! The virtual register size describes how many bytes the virtual register
+ //! needs to store its content. It can be smaller than the physical register
+ //! size, see `regSize()`.
+ inline uint32_t virtSize() const noexcept { return _virtSize; }
+
+ //! Returns the virtual register alignment.
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ //! Returns the virtual register type id, see `Type::Id`.
+ inline uint32_t typeId() const noexcept { return _typeId; }
+
+ //! Returns the virtual register weight - the register allocator can use it
+ //! as explicit hint for alloc/spill decisions.
+ inline uint32_t weight() const noexcept { return _weight; }
+ //! Sets the virtual register weight (0 to 255) - the register allocator can
+ //! use it as explicit hint for alloc/spill decisions and initial bin-packing.
+ inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
+
+ //! Returns whether the virtual register is always allocated to a fixed
+ //! physical register (and never reallocated).
+ //!
+ //! \note This is only used for special purposes and it's mostly internal.
+ inline bool isFixed() const noexcept { return bool(_isFixed); }
+
+ //! Returns whether the virtual register is indeed a stack that only uses
+ //! the virtual register id for making it accessible.
+ //!
+ //! \note It's an error if a stack is accessed as a register.
+ inline bool isStack() const noexcept { return bool(_isStack); }
+
+ inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
+ inline RAWorkReg* workReg() const noexcept { return _workReg; }
+ inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
+ inline void resetWorkReg() noexcept { _workReg = nullptr; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseCompiler]
+// ============================================================================
+
+//! Code emitter that uses virtual registers and performs register allocation.
+//!
+//! Compiler is a high-level code-generation tool that provides register
+//! allocation and automatic handling of function calling conventions. It was
+//! primarily designed for merging multiple parts of code into a function
+//! without worrying about registers and function calling conventions.
+//!
+//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit
+//! code at the same time.
+//!
+//! BaseCompiler is based on BaseBuilder and contains all the features it
+//! provides. It means that the code it stores can be modified (removed, added,
+//! injected) and analyzed. When the code is finalized the compiler can emit
+//! the code into an Assembler to translate the abstract representation into a
+//! machine code.
+class ASMJIT_VIRTAPI BaseCompiler : public BaseBuilder {
+public:
+ ASMJIT_NONCOPYABLE(BaseCompiler)
+ typedef BaseBuilder Base;
+
+ //! Current function.
+ FuncNode* _func;
+ //! Allocates `VirtReg` objects.
+ Zone _vRegZone;
+ //! Stores array of `VirtReg` pointers.
+ ZoneVector<VirtReg*> _vRegArray;
+ //! Stores jump annotations.
+ ZoneVector<JumpAnnotation*> _jumpAnnotations;
+
+ //! Local constant pool, flushed at the end of each function.
+ ConstPoolNode* _localConstPool;
+ //! Global constant pool, flushed by `finalize()`.
+ ConstPoolNode* _globalConstPool;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `BaseCompiler` instance.
+ ASMJIT_API BaseCompiler() noexcept;
+ //! Destroys the `BaseCompiler` instance.
+ ASMJIT_API virtual ~BaseCompiler() noexcept;
+
+ //! \}
+
+ //! \name Function API
+ //! \{
+
+ //! Returns the current function.
+ inline FuncNode* func() const noexcept { return _func; }
+
+ //! Creates a new `FuncNode`.
+ ASMJIT_API FuncNode* newFunc(const FuncSignature& sign) noexcept;
+ //! Adds a function `node` to the stream.
+ ASMJIT_API FuncNode* addFunc(FuncNode* func);
+ //! Adds a new function.
+ ASMJIT_API FuncNode* addFunc(const FuncSignature& sign);
+ //! Emits a sentinel that marks the end of the current function.
+ ASMJIT_API Error endFunc();
+
+ //! Sets a function argument at `argIndex` to `reg`.
+ ASMJIT_API Error setArg(uint32_t argIndex, const BaseReg& reg);
+
+ //! Creates a new `FuncRetNode`.
+ ASMJIT_API FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) noexcept;
+ //! Adds a new `FuncRetNode`.
+ ASMJIT_API FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) noexcept;
+
+ //! \}
+
+ //! \name Function Calls
+ //! \{
+
+ //! Creates a new `FuncCallNode`.
+ ASMJIT_API FuncCallNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
+ //! Adds a new `FuncCallNode`.
+ ASMJIT_API FuncCallNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& sign) noexcept;
+
+ //! \}
+
+ //! \name Virtual Registers
+ //! \{
+
+ //! Creates a new virtual register representing the given `typeId` and `signature`.
+ ASMJIT_API VirtReg* newVirtReg(uint32_t typeId, uint32_t signature, const char* name) noexcept;
+
+ ASMJIT_API Error _newReg(BaseReg& out, uint32_t typeId, const char* name = nullptr);
+ ASMJIT_API Error _newRegFmt(BaseReg& out, uint32_t typeId, const char* fmt, ...);
+
+ ASMJIT_API Error _newReg(BaseReg& out, const BaseReg& ref, const char* name = nullptr);
+ ASMJIT_API Error _newRegFmt(BaseReg& out, const BaseReg& ref, const char* fmt, ...);
+
+ //! Tests whether the given `id` is a valid virtual register id.
+ inline bool isVirtIdValid(uint32_t id) const noexcept {
+ uint32_t index = Operand::virtIdToIndex(id);
+ return index < _vRegArray.size();
+ }
+ //! Tests whether the given `reg` is a virtual register having a valid id.
+ inline bool isVirtRegValid(const BaseReg& reg) const noexcept {
+ return isVirtIdValid(reg.id());
+ }
+
+ //! Returns `VirtReg` associated with the given `id`.
+ inline VirtReg* virtRegById(uint32_t id) const noexcept {
+ ASMJIT_ASSERT(isVirtIdValid(id));
+ return _vRegArray[Operand::virtIdToIndex(id)];
+ }
+ //! Returns `VirtReg` associated with the given `reg`.
+ inline VirtReg* virtRegByReg(const BaseReg& reg) const noexcept { return virtRegById(reg.id()); }
+ //! Returns `VirtReg` associated with the given `index`.
+ inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
+
+ //! Returns an array of all virtual registers managed by the Compiler.
+ inline const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
+
+ //! \name Stack
+ //! \{
+
+ ASMJIT_API Error _newStack(BaseMem& out, uint32_t size, uint32_t alignment, const char* name = nullptr);
+
+ //! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
+ ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0) noexcept;
+
+ //! Updates the stack size of a stack created by `_newStack()`.
+ inline Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) noexcept {
+ return setStackSize(mem.id(), newSize, newAlignment);
+ }
+
+ //! \}
+
+ //! \name Constants
+ //! \{
+
+ ASMJIT_API Error _newConst(BaseMem& out, uint32_t scope, const void* data, size_t size);
+
+ //! \}
+
+ //! \name Miscellaneous
+ //! \{
+
+ //! Rename the given virtual register `reg` to a formatted string `fmt`.
+ //!
+ //! \note Only new name will appear in the logger.
+ ASMJIT_API void rename(const BaseReg& reg, const char* fmt, ...);
+
+ //! \}
+
+ //! \name Jump Annotations
+ //! \{
+
+ inline const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
+ return _jumpAnnotations;
+ }
+
+ ASMJIT_API JumpNode* newJumpNode(uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) noexcept;
+ ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
+
+ //! Returns a new `JumpAnnotation` instance, which can be used to aggregate
+ //! possible targets of a jump where the target is not a label, for example
+ //! to implement jump tables.
+ ASMJIT_API JumpAnnotation* newJumpAnnotation();
+
+ //! \}
+
+ // TODO: These should be removed
+ inline void alloc(BaseReg& reg) { DebugUtils::unused(reg); }
+ inline void spill(BaseReg& reg) { DebugUtils::unused(reg); }
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::JumpAnnotation]
+// ============================================================================
+
+class JumpAnnotation {
+public:
+ ASMJIT_NONCOPYABLE(JumpAnnotation)
+
+ BaseCompiler* _compiler;
+ uint32_t _annotationId;
+ ZoneVector<uint32_t> _labelIds;
+
+ inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
+ : _compiler(compiler),
+ _annotationId(annotationId) {}
+
+ inline BaseCompiler* compiler() const noexcept { return _compiler; }
+ inline uint32_t annotationId() const noexcept { return _annotationId; }
+ const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
+
+ inline bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
+ inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
+
+ inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
+ inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
+};
+
+// ============================================================================
+// [asmjit::JumpNode]
+// ============================================================================
+
+//! Jump instruction with \ref JumpAnnotation.
+//!
+//! \note This node should be only used to represent jump where the jump target
+//! cannot be deduced by examining instruction operands. For example if the jump
+//! target is register or memory location. This pattern is often used to perform
+//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
+class JumpNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(JumpNode)
+
+ JumpAnnotation* _annotation;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
+ : InstNode(cc, instId, options, opCount, kBaseOpCapacity),
+ _annotation(annotation) {
+ setType(kNodeJump);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool hasAnnotation() const noexcept { return _annotation != nullptr; }
+ inline JumpAnnotation* annotation() const noexcept { return _annotation; }
+ inline void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncNode]
+// ============================================================================
+
+//! Function entry (BaseCompiler).
+class FuncNode : public LabelNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncNode)
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Function frame.
+ FuncFrame _frame;
+ //! Function exit (label).
+ LabelNode* _exitNode;
+ //! Function end (sentinel).
+ SentinelNode* _end;
+ //! Arguments array as `VirtReg`.
+ VirtReg** _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncNode` instance.
+ //!
+ //! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
+ ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
+ : LabelNode(cb),
+ _funcDetail(),
+ _frame(),
+ _exitNode(nullptr),
+ _end(nullptr),
+ _args(nullptr) {
+ setType(kNodeFunc);
+ }
+
+ //! \}
+
+ //! \{
+ //! \name Accessors
+
+ //! Returns function exit `LabelNode`.
+ inline LabelNode* exitNode() const noexcept { return _exitNode; }
+ //! Returns function exit label.
+ inline Label exitLabel() const noexcept { return _exitNode->label(); }
+
+ //! Returns "End of Func" sentinel.
+ inline SentinelNode* endNode() const noexcept { return _end; }
+
+ //! Returns function declaration.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns function declaration.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns function frame.
+ inline FuncFrame& frame() noexcept { return _frame; }
+ //! Returns function frame.
+ inline const FuncFrame& frame() const noexcept { return _frame; }
+
+ //! Returns arguments count.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+ //! Returns returns count.
+ inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
+
+ //! Returns arguments list.
+ inline VirtReg** args() const noexcept { return _args; }
+
+ //! Returns argument at `i`.
+ inline VirtReg* arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ return _args[i];
+ }
+
+ //! Sets argument at `i`.
+ inline void setArg(uint32_t i, VirtReg* vReg) noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ _args[i] = vReg;
+ }
+
+ //! Resets argument at `i`.
+ inline void resetArg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < argCount());
+ _args[i] = nullptr;
+ }
+
+ inline uint32_t attributes() const noexcept { return _frame.attributes(); }
+ inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncRetNode]
+// ============================================================================
+
+//! Function return (BaseCompiler).
+class FuncRetNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncRetNode)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncRetNode` instance.
+ inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
+ _any._nodeType = kNodeFuncRet;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncCallNode]
+// ============================================================================
+
+//! Function call (BaseCompiler).
+class FuncCallNode : public InstNode {
+public:
+ ASMJIT_NONCOPYABLE(FuncCallNode)
+
+ //! Function detail.
+ FuncDetail _funcDetail;
+ //! Returns.
+ Operand_ _rets[2];
+ //! Arguments.
+ Operand_* _args;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FuncCallNode` instance.
+ inline FuncCallNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
+ : InstNode(cb, instId, options, kBaseOpCapacity),
+ _funcDetail(),
+ _args(nullptr) {
+ setType(kNodeFuncCall);
+ _resetOps();
+ _rets[0].reset();
+ _rets[1].reset();
+ addFlags(kFlagIsRemovable);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets the function signature.
+ inline Error setSignature(const FuncSignature& sign) noexcept {
+ return _funcDetail.init(sign);
+ }
+
+ //! Returns the function detail.
+ inline FuncDetail& detail() noexcept { return _funcDetail; }
+ //! Returns the function detail.
+ inline const FuncDetail& detail() const noexcept { return _funcDetail; }
+
+ //! Returns the target operand.
+ inline Operand& target() noexcept { return _opArray[0].as<Operand>(); }
+ //! \overload
+ inline const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
+
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
+ //! Returns the number of function return values.
+ inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
+
+ //! Returns the return value at `i`.
+ inline Operand& ret(uint32_t i = 0) noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return _rets[i].as<Operand>();
+ }
+ //! \overload
+ inline const Operand& ret(uint32_t i = 0) const noexcept {
+ ASMJIT_ASSERT(i < 2);
+ return _rets[i].as<Operand>();
+ }
+
+ //! Returns the function argument at `i`.
+ inline Operand& arg(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return _args[i].as<Operand>();
+ }
+ //! \overload
+ inline const Operand& arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < kFuncArgCountLoHi);
+ return _args[i].as<Operand>();
+ }
+
+ //! Sets the function argument at `i` to `op`.
+ ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
+ //! Sets the function return value at `i` to `op`.
+ ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
+
+ //! Sets the function argument at `i` to `reg`.
+ inline bool setArg(uint32_t i, const BaseReg& reg) noexcept { return _setArg(i, reg); }
+ //! Sets the function argument at `i` to `imm`.
+ inline bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
+
+ //! Sets the function return value at `i` to `var`.
+ inline bool setRet(uint32_t i, const BaseReg& reg) noexcept { return _setRet(i, reg); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncPass]
+// ============================================================================
+
+class ASMJIT_VIRTAPI FuncPass : public Pass {
+public:
+ ASMJIT_NONCOPYABLE(FuncPass)
+ typedef Pass Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API FuncPass(const char* name) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated `BaseCompiler`.
+ inline BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
+
+ //! \}
+
+ //! \name Run
+ //! \{
+
+ //! Calls `runOnFunction()` on each `FuncNode` node found.
+ ASMJIT_API Error run(Zone* zone, Logger* logger) noexcept override;
+
+ //! Called once per `FuncNode`.
+ virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) noexcept = 0;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_COMPILER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/constpool.cpp b/3rdparty/asmjit/src/asmjit/core/constpool.cpp
new file mode 100644
index 00000000000..4db68e2e6e9
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/constpool.cpp
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/constpool.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ConstPool - Construction / Destruction]
+// ============================================================================
+
+ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
+ConstPool::~ConstPool() noexcept {}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+void ConstPool::reset(Zone* zone) noexcept {
+ _zone = zone;
+
+ size_t dataSize = 1;
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].reset();
+ _tree[i].setDataSize(dataSize);
+ _gaps[i] = nullptr;
+ dataSize <<= 1;
+ }
+
+ _gapPool = nullptr;
+ _size = 0;
+ _alignment = 0;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Ops]
+// ============================================================================
+
+static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
+ ConstPool::Gap* gap = self->_gapPool;
+ if (!gap)
+ return self->_zone->allocT<ConstPool::Gap>();
+
+ self->_gapPool = gap->_next;
+ return gap;
+}
+
+static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
+ gap->_next = self->_gapPool;
+ self->_gapPool = gap;
+}
+
+static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
+ ASMJIT_ASSERT(size > 0);
+
+ while (size > 0) {
+ size_t gapIndex;
+ size_t gapSize;
+
+ if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
+ gapIndex = ConstPool::kIndex16;
+ gapSize = 16;
+ }
+ else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
+ gapIndex = ConstPool::kIndex8;
+ gapSize = 8;
+ }
+ else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
+ gapIndex = ConstPool::kIndex4;
+ gapSize = 4;
+ }
+ else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
+ gapIndex = ConstPool::kIndex2;
+ gapSize = 2;
+ }
+ else {
+ gapIndex = ConstPool::kIndex1;
+ gapSize = 1;
+ }
+
+ // We don't have to check for errors here, if this failed nothing really
+ // happened (just the gap won't be visible) and it will fail again at
+ // place where the same check would generate `kErrorOutOfMemory` error.
+ ConstPool::Gap* gap = ConstPool_allocGap(self);
+ if (!gap)
+ return;
+
+ gap->_next = self->_gaps[gapIndex];
+ self->_gaps[gapIndex] = gap;
+
+ gap->_offset = offset;
+ gap->_size = gapSize;
+
+ offset += gapSize;
+ size -= gapSize;
+ }
+}
+
+Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
+ size_t treeIndex;
+
+ if (size == 32)
+ treeIndex = kIndex32;
+ else if (size == 16)
+ treeIndex = kIndex16;
+ else if (size == 8)
+ treeIndex = kIndex8;
+ else if (size == 4)
+ treeIndex = kIndex4;
+ else if (size == 2)
+ treeIndex = kIndex2;
+ else if (size == 1)
+ treeIndex = kIndex1;
+ else
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ConstPool::Node* node = _tree[treeIndex].get(data);
+ if (node) {
+ dstOffset = node->_offset;
+ return kErrorOk;
+ }
+
+ // Before incrementing the current offset try if there is a gap that can
+ // be used for the requested data.
+ size_t offset = ~size_t(0);
+ size_t gapIndex = treeIndex;
+
+ while (gapIndex != kIndexCount - 1) {
+ ConstPool::Gap* gap = _gaps[treeIndex];
+
+ // Check if there is a gap.
+ if (gap) {
+ size_t gapOffset = gap->_offset;
+ size_t gapSize = gap->_size;
+
+ // Destroy the gap for now.
+ _gaps[treeIndex] = gap->_next;
+ ConstPool_freeGap(this, gap);
+
+ offset = gapOffset;
+ ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
+
+ gapSize -= size;
+ if (gapSize > 0)
+ ConstPool_addGap(this, gapOffset, gapSize);
+ }
+
+ gapIndex++;
+ }
+
+ if (offset == ~size_t(0)) {
+ // Get how many bytes have to be skipped so the address is aligned accordingly
+ // to the 'size'.
+ size_t diff = Support::alignUpDiff<size_t>(_size, size);
+
+ if (diff != 0) {
+ ConstPool_addGap(this, _size, diff);
+ _size += diff;
+ }
+
+ offset = _size;
+ _size += size;
+ }
+
+ // Add the initial node to the right index.
+ node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
+ if (!node) return DebugUtils::errored(kErrorOutOfMemory);
+
+ _tree[treeIndex].insert(node);
+ _alignment = Support::max<size_t>(_alignment, size);
+
+ dstOffset = offset;
+
+ // Now create a bunch of shared constants that are based on the data pattern.
+ // We stop at size 4, it probably doesn't make sense to split constants down
+ // to 1 byte.
+ size_t pCount = 1;
+ while (size > 4) {
+ size >>= 1;
+ pCount <<= 1;
+
+ ASMJIT_ASSERT(treeIndex != 0);
+ treeIndex--;
+
+ const uint8_t* pData = static_cast<const uint8_t*>(data);
+ for (size_t i = 0; i < pCount; i++, pData += size) {
+ node = _tree[treeIndex].get(pData);
+ if (node) continue;
+
+ node = ConstPool::Tree::_newNode(_zone, pData, size, offset + (i * size), true);
+ _tree[treeIndex].insert(node);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Reset]
+// ============================================================================
+
+struct ConstPoolFill {
+ inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
+ _dst(dst),
+ _dataSize(dataSize) {}
+
+ inline void operator()(const ConstPool::Node* node) noexcept {
+ if (!node->_shared)
+ memcpy(_dst + node->_offset, node->data(), _dataSize);
+ }
+
+ uint8_t* _dst;
+ size_t _dataSize;
+};
+
+void ConstPool::fill(void* dst) const noexcept {
+ // Clears possible gaps, asmjit should never emit garbage to the output.
+ memset(dst, 0, _size);
+
+ ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
+ _tree[i].forEach(filler);
+ filler._dataSize <<= 1;
+ }
+}
+
+// ============================================================================
+// [asmjit::ConstPool - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(const_pool) {
+ Zone zone(32384 - Zone::kBlockOverhead);
+ ConstPool pool(&zone);
+
+ uint32_t i;
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
+
+ INFO("Adding %u constants to the pool.", kCount);
+ {
+ size_t prevOffset;
+ size_t curOffset;
+ uint64_t c = 0x0101010101010101u;
+
+ EXPECT(pool.add(&c, 8, prevOffset) == kErrorOk);
+ EXPECT(prevOffset == 0);
+
+ for (i = 1; i < kCount; i++) {
+ c++;
+ EXPECT(pool.add(&c, 8, curOffset) == kErrorOk);
+ EXPECT(prevOffset + 8 == curOffset);
+ EXPECT(pool.size() == (i + 1) * 8);
+ prevOffset = curOffset;
+ }
+
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Retrieving %u constants from the pool.", kCount);
+ {
+ uint64_t c = 0x0101010101010101u;
+
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Checking if the constants were split into 4-byte patterns");
+ {
+ uint32_t c = 0x01010101;
+ for (i = 0; i < kCount; i++) {
+ size_t offset;
+ EXPECT(pool.add(&c, 4, offset) == kErrorOk);
+ EXPECT(offset == i * 8);
+ c++;
+ }
+ }
+
+ INFO("Adding 2 byte constant to misalign the current offset");
+ {
+ uint16_t c = 0xFFFF;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Adding 8 byte constant to check if pool gets aligned again");
+ {
+ uint64_t c = 0xFFFFFFFFFFFFFFFFu;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 8, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 8);
+ }
+
+ INFO("Adding 2 byte constant to verify the gap is filled");
+ {
+ uint16_t c = 0xFFFE;
+ size_t offset;
+
+ EXPECT(pool.add(&c, 2, offset) == kErrorOk);
+ EXPECT(offset == kCount * 8 + 2);
+ EXPECT(pool.alignment() == 8);
+ }
+
+ INFO("Checking reset functionality");
+ {
+ pool.reset(&zone);
+ zone.reset();
+
+ EXPECT(pool.size() == 0);
+ EXPECT(pool.alignment() == 0);
+ }
+
+ INFO("Checking pool alignment when combined constants are added");
+ {
+ uint8_t bytes[32] = { 0 };
+ size_t offset;
+
+ pool.add(bytes, 1, offset);
+ EXPECT(pool.size() == 1);
+ EXPECT(pool.alignment() == 1);
+ EXPECT(offset == 0);
+
+ pool.add(bytes, 2, offset);
+ EXPECT(pool.size() == 4);
+ EXPECT(pool.alignment() == 2);
+ EXPECT(offset == 2);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 4, offset);
+ EXPECT(pool.size() == 8);
+ EXPECT(pool.alignment() == 4);
+ EXPECT(offset == 4);
+
+ pool.add(bytes, 32, offset);
+ EXPECT(pool.size() == 64);
+ EXPECT(pool.alignment() == 32);
+ EXPECT(offset == 32);
+ }
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/constpool.h b/3rdparty/asmjit/src/asmjit/core/constpool.h
new file mode 100644
index 00000000000..259615fcaa8
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/constpool.h
@@ -0,0 +1,257 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::ConstPool]
+// ============================================================================
+
+//! Constant pool.
+class ConstPool {
+public:
+ ASMJIT_NONCOPYABLE(ConstPool)
+
+ //! Constant pool scope.
+ enum Scope : uint32_t {
+ //! Local constant, always embedded right after the current function.
+ kScopeLocal = 0,
+ //! Global constant, embedded at the end of the currently compiled code.
+ kScopeGlobal = 1
+ };
+
+ //! \cond INTERNAL
+
+ //! Index of a given size in const-pool table.
+ enum Index : uint32_t {
+ kIndex1 = 0,
+ kIndex2 = 1,
+ kIndex4 = 2,
+ kIndex8 = 3,
+ kIndex16 = 4,
+ kIndex32 = 5,
+ kIndexCount = 6
+ };
+
+ //! Zone-allocated const-pool gap created by two differently aligned constants.
+ struct Gap {
+ Gap* _next; //!< Pointer to the next gap
+ size_t _offset; //!< Offset of the gap.
+ size_t _size; //!< Remaining bytes of the gap (basically a gap size).
+ };
+
+ //! Zone-allocated const-pool node.
+ class Node : public ZoneTreeNodeT<Node> {
+ public:
+ ASMJIT_NONCOPYABLE(Node)
+
+ inline Node(size_t offset, bool shared) noexcept
+ : ZoneTreeNodeT<Node>(),
+ _shared(shared),
+ _offset(uint32_t(offset)) {}
+
+ inline void* data() const noexcept {
+ return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
+ }
+
+ uint32_t _shared : 1; //!< If this constant is shared with another.
+ uint32_t _offset; //!< Data offset from the beginning of the pool.
+ };
+
+ //! Data comparer used internally.
+ class Compare {
+ public:
+ inline Compare(size_t dataSize) noexcept
+ : _dataSize(dataSize) {}
+
+ inline int operator()(const Node& a, const Node& b) const noexcept {
+ return ::memcmp(a.data(), b.data(), _dataSize);
+ }
+
+ inline int operator()(const Node& a, const void* data) const noexcept {
+ return ::memcmp(a.data(), data, _dataSize);
+ }
+
+ size_t _dataSize;
+ };
+
+ //! Zone-allocated const-pool tree.
+ struct Tree {
+ inline explicit Tree(size_t dataSize = 0) noexcept
+ : _tree(),
+ _size(0),
+ _dataSize(dataSize) {}
+
+ inline void reset() noexcept {
+ _tree.reset();
+ _size = 0;
+ }
+
+ inline bool empty() const noexcept { return _size == 0; }
+ inline size_t size() const noexcept { return _size; }
+
+ inline void setDataSize(size_t dataSize) noexcept {
+ ASMJIT_ASSERT(empty());
+ _dataSize = dataSize;
+ }
+
+ inline Node* get(const void* data) noexcept {
+ Compare cmp(_dataSize);
+ return _tree.get(data, cmp);
+ }
+
+ inline void insert(Node* node) noexcept {
+ Compare cmp(_dataSize);
+ _tree.insert(node, cmp);
+ _size++;
+ }
+
+ template<typename Visitor>
+ inline void forEach(Visitor& visitor) const noexcept {
+ Node* node = _tree.root();
+ if (!node) return;
+
+ Node* stack[Globals::kMaxTreeHeight];
+ size_t top = 0;
+
+ for (;;) {
+ Node* left = node->left();
+ if (left != nullptr) {
+ ASMJIT_ASSERT(top != Globals::kMaxTreeHeight);
+ stack[top++] = node;
+
+ node = left;
+ continue;
+ }
+
+ for (;;) {
+ visitor(node);
+ node = node->right();
+
+ if (node != nullptr)
+ break;
+
+ if (top == 0)
+ return;
+
+ node = stack[--top];
+ }
+ }
+ }
+
+ static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
+ Node* node = zone->allocT<Node>(sizeof(Node) + size);
+ if (ASMJIT_UNLIKELY(!node)) return nullptr;
+
+ node = new(node) Node(offset, shared);
+ memcpy(node->data(), data, size);
+ return node;
+ }
+
+ //! RB tree.
+ ZoneTree<Node> _tree;
+ //! Size of the tree (number of nodes).
+ size_t _size;
+ //! Size of the data.
+ size_t _dataSize;
+ };
+
+ //! \endcond
+
+ //! Zone allocator.
+ Zone* _zone;
+ //! Tree per size.
+ Tree _tree[kIndexCount];
+ //! Gaps per size.
+ Gap* _gaps[kIndexCount];
+ //! Gaps pool
+ Gap* _gapPool;
+
+ //! Size of the pool (in bytes).
+ size_t _size;
+ //! Required pool alignment.
+ size_t _alignment;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API ConstPool(Zone* zone) noexcept;
+ ASMJIT_API ~ConstPool() noexcept;
+
+ ASMJIT_API void reset(Zone* zone) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the constant-pool is empty.
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the size of the constant-pool in bytes.
+ inline size_t size() const noexcept { return _size; }
+ //! Returns minimum alignment.
+ inline size_t alignment() const noexcept { return _alignment; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds a constant to the constant pool.
+ //!
+ //! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
+ //! The constant is added to the pool only if it doesn't not exist, otherwise
+ //! cached value is returned.
+ //!
+ //! AsmJit is able to subdivide added constants, so for example if you add
+ //! 8-byte constant 0x1122334455667788 it will create the following slots:
+ //!
+ //! 8-byte: 0x1122334455667788
+ //! 4-byte: 0x11223344, 0x55667788
+ //!
+ //! The reason is that when combining MMX/SSE/AVX code some patterns are used
+ //! frequently. However, AsmJit is not able to reallocate a constant that has
+ //! been already added. For example if you try to add 4-byte constant and then
+ //! 8-byte constant having the same 4-byte pattern as the previous one, two
+ //! independent slots will be generated by the pool.
+ ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
+
+ //! Fills the destination with the content of this constant pool.
+ ASMJIT_API void fill(void* dst) const noexcept;
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CONSTPOOL_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp b/3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp
new file mode 100644
index 00000000000..edc7d172227
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/cpuinfo.cpp
@@ -0,0 +1,97 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/cpuinfo.h"
+
+#if !defined(_WIN32)
+ #include <errno.h>
+ #include <sys/utsname.h>
+ #include <unistd.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU NumThreads]
+// ============================================================================
+
+#if defined(_WIN32)
+static inline uint32_t detectHWThreadCount() noexcept {
+ SYSTEM_INFO info;
+ ::GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+#elif defined(_SC_NPROCESSORS_ONLN)
+static inline uint32_t detectHWThreadCount() noexcept {
+ long res = ::sysconf(_SC_NPROCESSORS_ONLN);
+ return res <= 0 ? uint32_t(1) : uint32_t(res);
+}
+#else
+static inline uint32_t detectHWThreadCount() noexcept {
+ return 1;
+}
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - CPU Features]
+// ============================================================================
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+namespace x86 { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+namespace arm { void detectCpu(CpuInfo& cpu) noexcept; }
+#endif
+
+// ============================================================================
+// [asmjit::CpuInfo - Detect - Static Initializer]
+// ============================================================================
+
+static uint32_t cpuInfoInitialized;
+static CpuInfo cpuInfoGlobal(Globals::NoInit);
+
+const CpuInfo& CpuInfo::host() noexcept {
+ // This should never cause a problem as the resulting information should
+ // always be the same.
+ if (!cpuInfoInitialized) {
+ CpuInfo cpuInfoLocal;
+
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+ x86::detectCpu(cpuInfoLocal);
+#endif
+
+#if defined(ASMJIT_BUILD_ARM) && ASMJIT_ARCH_ARM
+ arm::detectCpu(cpuInfoLocal);
+#endif
+
+ cpuInfoLocal._hwThreadCount = detectHWThreadCount();
+ cpuInfoGlobal = cpuInfoLocal;
+ cpuInfoInitialized = 1;
+ }
+
+ return cpuInfoGlobal;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/cpuinfo.h b/3rdparty/asmjit/src/asmjit/core/cpuinfo.h
new file mode 100644
index 00000000000..d2defb90ed6
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/cpuinfo.h
@@ -0,0 +1,152 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
+#define ASMJIT_CORE_CPUINFO_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/features.h"
+#include "../core/globals.h"
+#include "../core/string.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_support
+//! \{
+
+// ============================================================================
+// [asmjit::CpuInfo]
+// ============================================================================
+
+//! CPU information.
+class CpuInfo {
+public:
+ //! CPU architecture information.
+ ArchInfo _archInfo;
+ //! CPU family ID.
+ uint32_t _familyId;
+ //! CPU model ID.
+ uint32_t _modelId;
+ //! CPU brand ID.
+ uint32_t _brandId;
+ //! CPU stepping.
+ uint32_t _stepping;
+ //! Processor type.
+ uint32_t _processorType;
+ //! Maximum number of addressable IDs for logical processors.
+ uint32_t _maxLogicalProcessors;
+ //! Cache line size (in bytes).
+ uint32_t _cacheLineSize;
+ //! Number of hardware threads.
+ uint32_t _hwThreadCount;
+
+ //! CPU vendor string.
+ FixedString<16> _vendor;
+ //! CPU brand string.
+ FixedString<64> _brand;
+ //! CPU features.
+ BaseFeatures _features;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline CpuInfo() noexcept { reset(); }
+ inline CpuInfo(const CpuInfo& other) noexcept = default;
+
+ inline explicit CpuInfo(Globals::NoInit_) noexcept
+ : _archInfo(Globals::NoInit),
+ _features(Globals::NoInit) {};
+
+ //! Returns the host CPU information.
+ ASMJIT_API static const CpuInfo& host() noexcept;
+
+ //! Initializes CpuInfo to the given architecture, see `ArchInfo`.
+ inline void initArch(uint32_t archId, uint32_t archMode = 0) noexcept {
+ _archInfo.init(archId, archMode);
+ }
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline CpuInfo& operator=(const CpuInfo& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the CPU architecture information.
+ inline const ArchInfo& archInfo() const noexcept { return _archInfo; }
+ //! Returns the CPU architecture id, see `ArchInfo::Id`.
+ inline uint32_t archId() const noexcept { return _archInfo.archId(); }
+ //! Returns the CPU architecture sub-id, see `ArchInfo::SubId`.
+ inline uint32_t archSubId() const noexcept { return _archInfo.archSubId(); }
+
+ //! Returns the CPU family ID.
+ inline uint32_t familyId() const noexcept { return _familyId; }
+ //! Returns the CPU model ID.
+ inline uint32_t modelId() const noexcept { return _modelId; }
+ //! Returns the CPU brand id.
+ inline uint32_t brandId() const noexcept { return _brandId; }
+ //! Returns the CPU stepping.
+ inline uint32_t stepping() const noexcept { return _stepping; }
+ //! Returns the processor type.
+ inline uint32_t processorType() const noexcept { return _processorType; }
+ //! Returns the number of maximum logical processors.
+ inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
+
+ //! Returns the size of a cache line flush.
+ inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
+ //! Returns number of hardware threads available.
+ inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
+
+ //! Returns the CPU vendor.
+ inline const char* vendor() const noexcept { return _vendor.str; }
+ //! Tests whether the CPU vendor is equal to `s`.
+ inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
+
+ //! Returns the CPU brand string.
+ inline const char* brand() const noexcept { return _brand.str; }
+
+ //! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
+ //! if needed.
+ template<typename T = BaseFeatures>
+ inline const T& features() const noexcept { return _features.as<T>(); }
+
+ //! Tests whether the CPU has the given `feature`.
+ inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
+ //! Adds the given CPU `feature` to the list of this CpuInfo features.
+ inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_CPUINFO_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/datatypes.h b/3rdparty/asmjit/src/asmjit/core/datatypes.h
new file mode 100644
index 00000000000..bee4572294c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/datatypes.h
@@ -0,0 +1,1073 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_DATATYPES_H_INCLUDED
+#define ASMJIT_CORE_DATATYPES_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_support
+//! \{
+
+// ============================================================================
+// [asmjit::Data64]
+// ============================================================================
+
+//! 64-bit data useful for creating SIMD constants.
+union Data64 {
+ //! Array of eight 8-bit signed integers.
+ int8_t sb[8];
+ //! Array of eight 8-bit unsigned integers.
+ uint8_t ub[8];
+ //! Array of four 16-bit signed integers.
+ int16_t sw[4];
+ //! Array of four 16-bit unsigned integers.
+ uint16_t uw[4];
+ //! Array of two 32-bit signed integers.
+ int32_t sd[2];
+ //! Array of two 32-bit unsigned integers.
+ uint32_t ud[2];
+ //! Array of one 64-bit signed integer.
+ int64_t sq[1];
+ //! Array of one 64-bit unsigned integer.
+ uint64_t uq[1];
+
+ //! Array of two SP-FP values.
+ float sf[2];
+ //! Array of one DP-FP value.
+ double df[1];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(int8_t x0) noexcept {
+ Data64 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(uint8_t x0) noexcept {
+ Data64 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ static inline Data64 fromI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ Data64 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ static inline Data64 fromU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ Data64 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0) noexcept {
+ Data64 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0) noexcept {
+ Data64 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ static inline Data64 fromI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ Data64 self;
+ self.setI16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ static inline Data64 fromU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ Data64 self;
+ self.setU16(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0) noexcept {
+ Data64 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0) noexcept {
+ Data64 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ static inline Data64 fromI32(int32_t x0, int32_t x1) noexcept {
+ Data64 self;
+ self.setI32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ static inline Data64 fromU32(uint32_t x0, uint32_t x1) noexcept {
+ Data64 self;
+ self.setU32(x0, x1);
+ return self;
+ }
+
+ //! Sets 64-bit signed integer.
+ static inline Data64 fromI64(int64_t x0) noexcept {
+ Data64 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ static inline Data64 fromU64(uint64_t x0) noexcept {
+ Data64 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0) noexcept {
+ Data64 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF32(float x0, float x1) noexcept {
+ Data64 self;
+ self.setF32(x0, x1);
+ return self;
+ }
+
+ //! Sets all two SP-FP values.
+ static inline Data64 fromF64(double x0) noexcept {
+ Data64 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all eight 8-bit signed integers.
+ inline void setI8(
+ int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept {
+
+ sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3;
+ sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7;
+ }
+
+ //! Sets all eight 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept {
+
+ ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3;
+ ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7;
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ }
+ }
+
+ //! Sets all four 16-bit signed integers.
+ inline void setI16(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept {
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ }
+
+ //! Sets all four 16-bit unsigned integers.
+ inline void setU16(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept {
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ sd[0] = x0; sd[1] = x0;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ ud[0] = x0; ud[1] = x0;
+ }
+
+ //! Sets all two 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1) noexcept {
+ sd[0] = x0; sd[1] = x1;
+ }
+
+ //! Sets all two 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1) noexcept {
+ ud[0] = x0; ud[1] = x1;
+ }
+
+ //! Sets 64-bit signed integer.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0;
+ }
+
+ //! Sets 64-bit unsigned integer.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF32(float x0, float x1) noexcept {
+ sf[0] = x0; sf[1] = x1;
+ }
+
+ //! Sets all two SP-FP values.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data128]
+// ============================================================================
+
+//! 128-bit data useful for creating SIMD constants.
+union Data128 {
+ //! Array of sixteen 8-bit signed integers.
+ int8_t sb[16];
+ //! Array of sixteen 8-bit unsigned integers.
+ uint8_t ub[16];
+ //! Array of eight 16-bit signed integers.
+ int16_t sw[8];
+ //! Array of eight 16-bit unsigned integers.
+ uint16_t uw[8];
+ //! Array of four 32-bit signed integers.
+ int32_t sd[4];
+ //! Array of four 32-bit unsigned integers.
+ uint32_t ud[4];
+ //! Array of two 64-bit signed integers.
+ int64_t sq[2];
+ //! Array of two 64-bit unsigned integers.
+ uint64_t uq[2];
+
+ //! Array of four 32-bit single precision floating points.
+ float sf[4];
+ //! Array of two 64-bit double precision floating points.
+ double df[2];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(int8_t x0) noexcept {
+ Data128 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(uint8_t x0) noexcept {
+ Data128 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ static inline Data128 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ Data128 self;
+ self.setI8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ static inline Data128 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ Data128 self;
+ self.setU8(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(int16_t x0) noexcept {
+ Data128 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(uint16_t x0) noexcept {
+ Data128 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ static inline Data128 fromI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ Data128 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ static inline Data128 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ Data128 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0) noexcept {
+ Data128 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0) noexcept {
+ Data128 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ static inline Data128 fromI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ Data128 self;
+ self.setI32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ static inline Data128 fromU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ Data128 self;
+ self.setU32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0) noexcept {
+ Data128 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0) noexcept {
+ Data128 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ static inline Data128 fromI64(int64_t x0, int64_t x1) noexcept {
+ Data128 self;
+ self.setI64(x0, x1);
+ return self;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ static inline Data128 fromU64(uint64_t x0, uint64_t x1) noexcept {
+ Data128 self;
+ self.setU64(x0, x1);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0) noexcept {
+ Data128 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all four SP-FP floats.
+ static inline Data128 fromF32(float x0, float x1, float x2, float x3) noexcept {
+ Data128 self;
+ self.setF32(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0) noexcept {
+ Data128 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all two DP-FP floats.
+ static inline Data128 fromF64(double x0, double x1) noexcept {
+ Data128 self;
+ self.setF64(x0, x1);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all sixteen 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ }
+
+ //! Sets all sixteen 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ }
+ }
+
+ //! Sets all eight 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept {
+
+ sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3;
+ sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7;
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept {
+
+ uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3;
+ uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7;
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t t = (uint64_t(x0) << 32) + x0;
+ uq[0] = t;
+ uq[1] = t;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ }
+ }
+
+ //! Sets all four 32-bit signed integers.
+ inline void setI32(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept {
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ }
+
+ //! Sets all four 32-bit unsigned integers.
+ inline void setU32(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept {
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0;
+ }
+
+ //! Sets all two 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1) noexcept {
+ sq[0] = x0; sq[1] = x1;
+ }
+
+ //! Sets all two 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1) noexcept {
+ uq[0] = x0; uq[1] = x1;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ }
+
+ //! Sets all four SP-FP floats.
+ inline void setF32(float x0, float x1, float x2, float x3) noexcept {
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0;
+ }
+
+ //! Sets all two DP-FP floats.
+ inline void setF64(double x0, double x1) noexcept {
+ df[0] = x0; df[1] = x1;
+ }
+};
+
+// ============================================================================
+// [asmjit::Data256]
+// ============================================================================
+
+//! 256-bit data useful for creating SIMD constants.
+union Data256 {
+ //! Array of thirty two 8-bit signed integers.
+ int8_t sb[32];
+ //! Array of thirty two 8-bit unsigned integers.
+ uint8_t ub[32];
+ //! Array of sixteen 16-bit signed integers.
+ int16_t sw[16];
+ //! Array of sixteen 16-bit unsigned integers.
+ uint16_t uw[16];
+ //! Array of eight 32-bit signed integers.
+ int32_t sd[8];
+ //! Array of eight 32-bit unsigned integers.
+ uint32_t ud[8];
+ //! Array of four 64-bit signed integers.
+ int64_t sq[4];
+ //! Array of four 64-bit unsigned integers.
+ uint64_t uq[4];
+
+ //! Array of eight 32-bit single precision floating points.
+ float sf[8];
+ //! Array of four 64-bit double precision floating points.
+ double df[4];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(int8_t x0) noexcept {
+ Data256 self;
+ self.setI8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(uint8_t x0) noexcept {
+ Data256 self;
+ self.setU8(x0);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ static inline Data256 fromI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ Data256 self;
+ self.setI8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ static inline Data256 fromU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ Data256 self;
+ self.setU8(
+ x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(int16_t x0) noexcept {
+ Data256 self;
+ self.setI16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(uint16_t x0) noexcept {
+ Data256 self;
+ self.setU16(x0);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ static inline Data256 fromI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 ,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ Data256 self;
+ self.setI16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ static inline Data256 fromU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 ,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ Data256 self;
+ self.setU16(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(int32_t x0) noexcept {
+ Data256 self;
+ self.setI32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(uint32_t x0) noexcept {
+ Data256 self;
+ self.setU32(x0);
+ return self;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ static inline Data256 fromI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ Data256 self;
+ self.setI32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ static inline Data256 fromU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ Data256 self;
+ self.setU32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0) noexcept {
+ Data256 self;
+ self.setI64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0) noexcept {
+ Data256 self;
+ self.setU64(x0);
+ return self;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ static inline Data256 fromI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ Data256 self;
+ self.setI64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ static inline Data256 fromU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ Data256 self;
+ self.setU64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(float x0) noexcept {
+ Data256 self;
+ self.setF32(x0);
+ return self;
+ }
+
+ //! Sets all eight SP-FP floats.
+ static inline Data256 fromF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ Data256 self;
+ self.setF32(x0, x1, x2, x3, x4, x5, x6, x7);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0) noexcept {
+ Data256 self;
+ self.setF64(x0);
+ return self;
+ }
+
+ //! Sets all four DP-FP floats.
+ static inline Data256 fromF64(double x0, double x1, double x2, double x3) noexcept {
+ Data256 self;
+ self.setF64(x0, x1, x2, x3);
+ return self;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(int8_t x0) noexcept {
+ setU8(uint8_t(x0));
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(uint8_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0101010101010101u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x01010101u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all thirty two 8-bit signed integers.
+ inline void setI8(
+ int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 ,
+ int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 ,
+ int8_t x8 , int8_t x9 , int8_t x10, int8_t x11,
+ int8_t x12, int8_t x13, int8_t x14, int8_t x15,
+ int8_t x16, int8_t x17, int8_t x18, int8_t x19,
+ int8_t x20, int8_t x21, int8_t x22, int8_t x23,
+ int8_t x24, int8_t x25, int8_t x26, int8_t x27,
+ int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept {
+
+ sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ;
+ sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ;
+ sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11;
+ sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15;
+ sb[16] = x16; sb[17] = x17; sb[18] = x18; sb[19] = x19;
+ sb[20] = x20; sb[21] = x21; sb[22] = x22; sb[23] = x23;
+ sb[24] = x24; sb[25] = x25; sb[26] = x26; sb[27] = x27;
+ sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31;
+ }
+
+ //! Sets all thirty two 8-bit unsigned integers.
+ inline void setU8(
+ uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 ,
+ uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 ,
+ uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11,
+ uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15,
+ uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19,
+ uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23,
+ uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27,
+ uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept {
+
+ ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ;
+ ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ;
+ ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11;
+ ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15;
+ ub[16] = x16; ub[17] = x17; ub[18] = x18; ub[19] = x19;
+ ub[20] = x20; ub[21] = x21; ub[22] = x22; ub[23] = x23;
+ ub[24] = x24; ub[25] = x25; ub[26] = x26; ub[27] = x27;
+ ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31;
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(int16_t x0) noexcept {
+ setU16(uint16_t(x0));
+ }
+
+ //! Sets all eight 16-bit unsigned integers.
+ inline void setU16(uint16_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = uint64_t(x0) * 0x0001000100010001u;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ uint32_t xd = uint32_t(x0) * 0x00010001u;
+ ud[0] = xd;
+ ud[1] = xd;
+ ud[2] = xd;
+ ud[3] = xd;
+ ud[4] = xd;
+ ud[5] = xd;
+ ud[6] = xd;
+ ud[7] = xd;
+ }
+ }
+
+ //! Sets all sixteen 16-bit signed integers.
+ inline void setI16(
+ int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7,
+ int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept {
+
+ sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ;
+ sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ;
+ sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11;
+ sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15;
+ }
+
+ //! Sets all sixteen 16-bit unsigned integers.
+ inline void setU16(
+ uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7,
+ uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept {
+
+ uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ;
+ uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ;
+ uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11;
+ uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15;
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(int32_t x0) noexcept {
+ setU32(uint32_t(x0));
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(uint32_t x0) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ uint64_t xq = (uint64_t(x0) << 32) + x0;
+ uq[0] = xq;
+ uq[1] = xq;
+ uq[2] = xq;
+ uq[3] = xq;
+ }
+ else {
+ ud[0] = x0;
+ ud[1] = x0;
+ ud[2] = x0;
+ ud[3] = x0;
+ ud[4] = x0;
+ ud[5] = x0;
+ ud[6] = x0;
+ ud[7] = x0;
+ }
+ }
+
+ //! Sets all eight 32-bit signed integers.
+ inline void setI32(
+ int32_t x0, int32_t x1, int32_t x2, int32_t x3,
+ int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept {
+
+ sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3;
+ sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7;
+ }
+
+ //! Sets all eight 32-bit unsigned integers.
+ inline void setU32(
+ uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3,
+ uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept {
+
+ ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3;
+ ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0) noexcept {
+ sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0) noexcept {
+ uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0;
+ }
+
+ //! Sets all four 64-bit signed integers.
+ inline void setI64(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept {
+ sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3;
+ }
+
+ //! Sets all four 64-bit unsigned integers.
+ inline void setU64(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept {
+ uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(float x0) noexcept {
+ sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0;
+ sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0;
+ }
+
+ //! Sets all eight SP-FP floats.
+ inline void setF32(
+ float x0, float x1, float x2, float x3,
+ float x4, float x5, float x6, float x7) noexcept {
+
+ sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3;
+ sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0) noexcept {
+ df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0;
+ }
+
+ //! Sets all four DP-FP floats.
+ inline void setF64(double x0, double x1, double x2, double x3) noexcept {
+ df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_DATATYPES_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/emitter.cpp b/3rdparty/asmjit/src/asmjit/core/emitter.cpp
new file mode 100644
index 00000000000..ebf8c179008
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/emitter.cpp
@@ -0,0 +1,272 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/logging.h"
+#include "../core/support.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86internal_p.h"
+ #include "../x86/x86instdb_p.h"
+#endif // ASMJIT_BUILD_X86
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminternal_p.h"
+ #include "../arm/arminstdb.h"
+#endif // ASMJIT_BUILD_ARM
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::BaseEmitter - Construction / Destruction]
+// ============================================================================
+
+BaseEmitter::BaseEmitter(uint32_t type) noexcept
+ : _type(uint8_t(type)),
+ _reserved(0),
+ _flags(0),
+ _emitterOptions(0),
+ _code(nullptr),
+ _errorHandler(nullptr),
+ _codeInfo(),
+ _gpRegInfo(),
+ _privateData(0),
+ _instOptions(0),
+ _globalInstOptions(BaseInst::kOptionReserved),
+ _extraReg(),
+ _inlineComment(nullptr) {}
+
+BaseEmitter::~BaseEmitter() noexcept {
+ if (_code) {
+ _addFlags(kFlagDestroyed);
+ _code->detach(this);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Code-Generation]
+// ============================================================================
+
+Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t count) {
+ const Operand_* op = operands;
+ const Operand& none_ = Globals::none;
+
+ switch (count) {
+ case 0: return _emit(instId, none_, none_, none_, none_);
+ case 1: return _emit(instId, op[0], none_, none_, none_);
+ case 2: return _emit(instId, op[0], op[1], none_, none_);
+ case 3: return _emit(instId, op[0], op[1], op[2], none_);
+ case 4: return _emit(instId, op[0], op[1], op[2], op[3]);
+ case 5: return _emit(instId, op[0], op[1], op[2], op[3], op[4], none_);
+ case 6: return _emit(instId, op[0], op[1], op[2], op[3], op[4], op[5]);
+ default: return DebugUtils::errored(kErrorInvalidArgument);
+ }
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Finalize]
+// ============================================================================
+
+Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
+ return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Finalize]
+// ============================================================================
+
+Error BaseEmitter::finalize() {
+ // Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Error Handling]
+// ============================================================================
+
+Error BaseEmitter::reportError(Error err, const char* message) {
+ ErrorHandler* handler = errorHandler();
+ if (!handler) {
+ if (code())
+ handler = code()->errorHandler();
+ }
+
+ if (handler) {
+ if (!message)
+ message = DebugUtils::errorAsString(err);
+ handler->handleError(err, message, this);
+ }
+
+ return err;
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Label Management]
+// ============================================================================
+
+bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
+ return _code && labelId < _code->labelCount();
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Emit (High-Level)]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (archInfo().isX86Family())
+ return x86::X86Internal::emitProlog(as<x86::Emitter>(), frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (archInfo().isArmFamily())
+ return arm::ArmInternal::emitProlog(as<arm::Emitter>(), frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (archInfo().isX86Family())
+ return x86::X86Internal::emitEpilog(as<x86::Emitter>(), frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (archInfo().isArmFamily())
+ return arm::ArmInternal::emitEpilog(as<arm::Emitter>(), frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifdef ASMJIT_BUILD_X86
+ if (archInfo().isX86Family())
+ return x86::X86Internal::emitArgsAssignment(as<x86::Emitter>(), frame, args);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (archInfo().isArmFamily())
+ return arm::ArmInternal::emitArgsAssignment(as<arm::Emitter>(), frame, args);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Comment]
+// ============================================================================
+
+Error BaseEmitter::commentf(const char* fmt, ...) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_LOGGING
+ StringTmp<1024> sb;
+
+ va_list ap;
+ va_start(ap, fmt);
+ Error err = sb.appendVFormat(fmt, ap);
+ va_end(ap);
+
+ if (ASMJIT_UNLIKELY(err))
+ return err;
+
+ return comment(sb.data(), sb.size());
+#else
+ DebugUtils::unused(fmt);
+ return kErrorOk;
+#endif
+}
+
+Error BaseEmitter::commentv(const char* fmt, va_list ap) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+#ifndef ASMJIT_NO_LOGGING
+ StringTmp<1024> sb;
+
+ Error err = sb.appendVFormat(fmt, ap);
+ if (ASMJIT_UNLIKELY(err))
+ return err;
+
+ return comment(sb.data(), sb.size());
+#else
+ DebugUtils::unused(fmt, ap);
+ return kErrorOk;
+#endif
+}
+
+// ============================================================================
+// [asmjit::BaseEmitter - Events]
+// ============================================================================
+
+Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
+ _code = code;
+ _codeInfo = code->codeInfo();
+ _emitterOptions = code->emitterOptions();
+
+ onUpdateGlobalInstOptions();
+ return kErrorOk;
+}
+
+Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
+ DebugUtils::unused(code);
+
+ _flags = 0;
+ _emitterOptions = 0;
+ _errorHandler = nullptr;
+
+ _codeInfo.reset();
+ _gpRegInfo.reset();
+ _privateData = 0;
+
+ _instOptions = 0;
+ _globalInstOptions = BaseInst::kOptionReserved;
+ _extraReg.reset();
+ _inlineComment = nullptr;
+
+ return kErrorOk;
+}
+
+void BaseEmitter::onUpdateGlobalInstOptions() noexcept {
+ constexpr uint32_t kCriticalEmitterOptions =
+ kOptionLoggingEnabled |
+ kOptionStrictValidation ;
+
+ _globalInstOptions &= ~BaseInst::kOptionReserved;
+ if ((_emitterOptions & kCriticalEmitterOptions) != 0)
+ _globalInstOptions |= BaseInst::kOptionReserved;
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/emitter.h b/3rdparty/asmjit/src/asmjit/core/emitter.h
new file mode 100644
index 00000000000..585558457bd
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/emitter.h
@@ -0,0 +1,554 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
+#define ASMJIT_CORE_EMITTER_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/inst.h"
+#include "../core/operand.h"
+#include "../core/codeholder.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class ConstPool;
+class FuncFrame;
+class FuncArgsAssignment;
+
+// ============================================================================
+// [asmjit::BaseEmitter]
+// ============================================================================
+
+//! Provides a base foundation to emit code - specialized by `Assembler` and
+//! `BaseBuilder`.
+class ASMJIT_VIRTAPI BaseEmitter {
+public:
+ ASMJIT_BASE_CLASS(BaseEmitter)
+
+ //! See `EmitterType`.
+ uint8_t _type;
+ //! Reserved for future use.
+ uint8_t _reserved;
+ //! See \ref BaseEmitter::Flags.
+ uint16_t _flags;
+ //! Emitter options, always in sync with CodeHolder.
+ uint32_t _emitterOptions;
+
+ //! CodeHolder the BaseEmitter is attached to.
+ CodeHolder* _code;
+ //! Attached `ErrorHandler`.
+ ErrorHandler* _errorHandler;
+
+ //! Basic information about the code (matches CodeHolder::_codeInfo).
+ CodeInfo _codeInfo;
+ //! Native GP register signature and signature related information.
+ RegInfo _gpRegInfo;
+ //! Internal private data used freely by any emitter.
+ uint32_t _privateData;
+
+ //! Next instruction options (affects the next instruction).
+ uint32_t _instOptions;
+ //! Global Instruction options (combined with `_instOptions` by `emit...()`).
+ uint32_t _globalInstOptions;
+ //! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
+ RegOnly _extraReg;
+ //! Inline comment of the next instruction (affects the next instruction).
+ const char* _inlineComment;
+
+ //! Emitter type.
+ enum EmitterType : uint32_t {
+ //! Unknown or uninitialized.
+ kTypeNone = 0,
+ //! Emitter inherits from `BaseAssembler`.
+ kTypeAssembler = 1,
+ //! Emitter inherits from `BaseBuilder`.
+ kTypeBuilder = 2,
+ //! Emitter inherits from `BaseCompiler`.
+ kTypeCompiler = 3,
+ //! Count of emitter types.
+ kTypeCount = 4
+ };
+
+ //! Emitter flags.
+ enum Flags : uint32_t {
+ //! The emitter was finalized.
+ kFlagFinalized = 0x4000u,
+ //! The emitter was destroyed.
+ kFlagDestroyed = 0x8000u
+ };
+
+ //! Emitter options.
+ enum Options : uint32_t {
+ //! Logging is enabled, `BaseEmitter::logger()` must return a valid logger.
+ //! This option is set automatically by the emitter if the logger is present.
+ //! User code should never alter this value.
+ //!
+ //! Default `false`.
+ kOptionLoggingEnabled = 0x00000001u,
+
+ //! Stricly validate each instruction before it's emitted.
+ //!
+ //! Default `false`.
+ kOptionStrictValidation = 0x00000002u,
+
+ //! Emit instructions that are optimized for size, if possible.
+ //!
+ //! Default `false`.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! When this option is set it the assembler will try to fix instructions
+ //! if possible into operation equivalent instructions that take less bytes
+ //! by taking advantage of implicit zero extension. For example instruction
+ //! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
+ //! and `and r32, imm` when the immediate constant is lesser than `2^31`.
+ kOptionOptimizedForSize = 0x00000004u,
+
+ //! Emit optimized code-alignment sequences.
+ //!
+ //! Default `false`.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Default align sequence used by X86 architecture is one-byte (0x90)
+ //! opcode that is often shown by disassemblers as NOP. However there are
+ //! more optimized align sequences for 2-11 bytes that may execute faster
+ //! on certain CPUs. If this feature is enabled AsmJit will generate
+ //! specialized sequences for alignment between 2 to 11 bytes.
+ kOptionOptimizedAlign = 0x00000008u,
+
+ //! Emit jump-prediction hints.
+ //!
+ //! Default `false`.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Jump prediction is usually based on the direction of the jump. If the
+ //! jump is backward it is usually predicted as taken; and if the jump is
+ //! forward it is usually predicted as not-taken. The reason is that loops
+ //! generally use backward jumps and conditions usually use forward jumps.
+ //! However this behavior can be overridden by using instruction prefixes.
+ //! If this option is enabled these hints will be emitted.
+ //!
+ //! This feature is disabled by default, because the only processor that
+ //! used to take into consideration prediction hints was P4. Newer processors
+ //! implement heuristics for branch prediction and ignore static hints. This
+ //! means that this feature can be used for annotation purposes.
+ kOptionPredictedJumps = 0x00000010u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit BaseEmitter(uint32_t type) noexcept;
+ ASMJIT_API virtual ~BaseEmitter() noexcept;
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ template<typename T>
+ inline T* as() noexcept { return reinterpret_cast<T*>(this); }
+
+ template<typename T>
+ inline const T* as() const noexcept { return reinterpret_cast<const T*>(this); }
+
+ //! \}
+
+ //! \name Emitter Type & Flags
+ //! \{
+
+ //! Returns the type of this emitter, see `EmitterType`.
+ inline uint32_t emitterType() const noexcept { return _type; }
+ //! Returns emitter flags , see `Flags`.
+ inline uint32_t emitterFlags() const noexcept { return _flags; }
+
+ //! Tests whether the emitter inherits from `BaseAssembler`.
+ inline bool isAssembler() const noexcept { return _type == kTypeAssembler; }
+ //! Tests whether the emitter inherits from `BaseBuilder`.
+ //!
+ //! \note Both Builder and Compiler emitters would return `true`.
+ inline bool isBuilder() const noexcept { return _type >= kTypeBuilder; }
+ //! Tests whether the emitter inherits from `BaseCompiler`.
+ inline bool isCompiler() const noexcept { return _type == kTypeCompiler; }
+
+ //! Tests whether the emitter has the given `flag` enabled.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Tests whether the emitter is finalized.
+ inline bool isFinalized() const noexcept { return hasFlag(kFlagFinalized); }
+ //! Tests whether the emitter is destroyed (only used during destruction).
+ inline bool isDestroyed() const noexcept { return hasFlag(kFlagDestroyed); }
+
+ inline void _addFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags | flags); }
+ inline void _clearFlags(uint32_t flags) noexcept { _flags = uint16_t(_flags & ~flags); }
+
+ //! \}
+
+ //! \name Target Information
+ //! \{
+
+ //! Returns the CodeHolder this emitter is attached to.
+ inline CodeHolder* code() const noexcept { return _code; }
+ //! Returns an information about the code, see `CodeInfo`.
+ inline const CodeInfo& codeInfo() const noexcept { return _codeInfo; }
+ //! Returns an information about the architecture, see `ArchInfo`.
+ inline const ArchInfo& archInfo() const noexcept { return _codeInfo.archInfo(); }
+
+ //! Tests whether the target architecture is 32-bit.
+ inline bool is32Bit() const noexcept { return archInfo().is32Bit(); }
+ //! Tests whether the target architecture is 64-bit.
+ inline bool is64Bit() const noexcept { return archInfo().is64Bit(); }
+
+ //! Returns the target architecture type.
+ inline uint32_t archId() const noexcept { return archInfo().archId(); }
+ //! Returns the target architecture sub-type.
+ inline uint32_t archSubId() const noexcept { return archInfo().archSubId(); }
+ //! Returns the target architecture's GP register size (4 or 8 bytes).
+ inline uint32_t gpSize() const noexcept { return archInfo().gpSize(); }
+ //! Returns the number of target GP registers.
+ inline uint32_t gpCount() const noexcept { return archInfo().gpCount(); }
+
+ //! \}
+
+ //! \name Initialization & Finalization
+ //! \{
+
+ //! Tests whether the BaseEmitter is initialized (i.e. attached to the `CodeHolder`).
+ inline bool isInitialized() const noexcept { return _code != nullptr; }
+
+ ASMJIT_API virtual Error finalize();
+
+ //! \}
+
+ //! \name Emitter Options
+ //! \{
+
+ //! Tests whether the `option` is present in emitter options.
+ inline bool hasEmitterOption(uint32_t option) const noexcept { return (_emitterOptions & option) != 0; }
+ //! Returns the emitter options.
+ inline uint32_t emitterOptions() const noexcept { return _emitterOptions; }
+
+ // TODO: Deprecate and remove, CodeHolder::addEmitterOptions() is the way.
+ inline void addEmitterOptions(uint32_t options) noexcept {
+ _emitterOptions |= options;
+ onUpdateGlobalInstOptions();
+ }
+
+ inline void clearEmitterOptions(uint32_t options) noexcept {
+ _emitterOptions &= ~options;
+ onUpdateGlobalInstOptions();
+ }
+
+ //! Returns the global instruction options.
+ //!
+ //! Default instruction options are merged with instruction options before the
+ //! instruction is encoded. These options have some bits reserved that are used
+ //! for error handling, logging, and strict validation. Other options are globals that
+ //! affect each instruction, for example if VEX3 is set globally, it will all
+ //! instructions, even those that don't have such option set.
+ inline uint32_t globalInstOptions() const noexcept { return _globalInstOptions; }
+
+ //! \}
+
+ //! \name Error Handling
+ //! \{
+
+ //! Tests whether the local error handler is attached.
+ inline bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
+ //! Returns the local error handler.
+ inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
+ //! Sets the local error handler.
+ inline void setErrorHandler(ErrorHandler* handler) noexcept { _errorHandler = handler; }
+ //! Resets the local error handler (does nothing if not attached).
+ inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
+
+ //! Handles the given error in the following way:
+ //! 1. Gets either Emitter's (preferred) or CodeHolder's ErrorHandler.
+ //! 2. If exists, calls `ErrorHandler::handleError(error, message, this)`.
+ //! 3. Returns the given `err` if ErrorHandler haven't thrown.
+ ASMJIT_API Error reportError(Error err, const char* message = nullptr);
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ //! Returns options of the next instruction.
+ inline uint32_t instOptions() const noexcept { return _instOptions; }
+ //! Returns options of the next instruction.
+ inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
+ //! Adds options of the next instruction.
+ inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
+ //! Resets options of the next instruction.
+ inline void resetInstOptions() noexcept { _instOptions = 0; }
+
+ //! Tests whether the extra register operand is valid.
+ inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
+ //! Returns an extra operand that will be used by the next instruction (architecture specific).
+ inline const RegOnly& extraReg() const noexcept { return _extraReg; }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
+ //! Sets an extra operand that will be used by the next instruction (architecture specific).
+ inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ //! Resets an extra operand that will be used by the next instruction (architecture specific).
+ inline void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! Returns comment/annotation of the next instruction.
+ inline const char* inlineComment() const noexcept { return _inlineComment; }
+ //! Sets comment/annotation of the next instruction.
+ //!
+ //! \note This string is set back to null by `_emit()`, but until that it has
+ //! to remain valid as the Emitter is not required to make a copy of it (and
+ //! it would be slow to do that for each instruction).
+ inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
+ //! Resets the comment/annotation to nullptr.
+ inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
+
+ //! \}
+
+ //! \name Sections
+ //! \{
+
+ virtual Error section(Section* section) = 0;
+
+ //! \}
+
+ //! \name Labels
+ //! \{
+
+ //! Creates a new label.
+ virtual Label newLabel() = 0;
+ //! Creates a new named label.
+ virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
+
+ //! Returns `Label` by `name`.
+ //!
+ //! Returns invalid Label in case that the name is invalid or label was not found.
+ //!
+ //! \note This function doesn't trigger ErrorHandler in case the name is invalid
+ //! or no such label exist. You must always check the validity of the `Label` returned.
+ ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
+
+ //! Binds the `label` to the current position of the current section.
+ //!
+ //! \note Attempt to bind the same label multiple times will return an error.
+ virtual Error bind(const Label& label) = 0;
+
+ //! Tests whether the label `id` is valid (i.e. registered).
+ ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
+ //! Tests whether the `label` is valid (i.e. registered).
+ inline bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ // NOTE: These `emit()` helpers are designed to address a code-bloat generated
+ // by C++ compilers to call a function having many arguments. Each parameter to
+ // `_emit()` requires some code to pass it, which means that if we default to 4
+ // operand parameters in `_emit()` and instId the C++ compiler would have to
+ // generate a virtual function call having 5 parameters, which is quite a lot.
+ // Since by default asm instructions have 2 to 3 operands it's better to
+ // introduce helpers that pass those and fill out the remaining operands.
+
+ #define OP const Operand_&
+ #define NONE Globals::none
+
+ //! Emits an instruction.
+ ASMJIT_NOINLINE Error emit(uint32_t instId) { return _emit(instId, NONE, NONE, NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0) { return _emit(instId, o0, NONE, NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1) { return _emit(instId, o0, o1, NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2) { return _emit(instId, o0, o1, o2, NONE); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3) { return _emit(instId, o0, o1, o2, o3); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4) { return _emit(instId, o0, o1, o2, o3, o4, NONE); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, OP o5) { return _emit(instId, o0, o1, o2, o3, o4, o5); }
+
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, int o0) { return _emit(instId, Imm(o0), NONE, NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, int o1) { return _emit(instId, o0, Imm(o1), NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, int o2) { return _emit(instId, o0, o1, Imm(o2), NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, int o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
+
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, int64_t o0) { return _emit(instId, Imm(o0), NONE, NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, int64_t o1) { return _emit(instId, o0, Imm(o1), NONE, NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, int64_t o2) { return _emit(instId, o0, o1, Imm(o2), NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, int64_t o3) { return _emit(instId, o0, o1, o2, Imm(o3)); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, int64_t o4) { return _emit(instId, o0, o1, o2, o3, Imm(o4), NONE); }
+ //! \overload
+ ASMJIT_NOINLINE Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, int64_t o5) { return _emit(instId, o0, o1, o2, o3, o4, Imm(o5)); }
+
+ //! \overload
+ inline Error emit(uint32_t instId, unsigned int o0) { return emit(instId, int64_t(o0)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, unsigned int o1) { return emit(instId, o0, int64_t(o1)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, unsigned int o2) { return emit(instId, o0, o1, int64_t(o2)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, unsigned int o3) { return emit(instId, o0, o1, o2, int64_t(o3)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, unsigned int o4) { return emit(instId, o0, o1, o2, o3, int64_t(o4)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, unsigned int o5) { return emit(instId, o0, o1, o2, o3, o4, int64_t(o5)); }
+
+ //! \overload
+ inline Error emit(uint32_t instId, uint64_t o0) { return emit(instId, int64_t(o0)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, uint64_t o1) { return emit(instId, o0, int64_t(o1)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, uint64_t o2) { return emit(instId, o0, o1, int64_t(o2)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, uint64_t o3) { return emit(instId, o0, o1, o2, int64_t(o3)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, uint64_t o4) { return emit(instId, o0, o1, o2, o3, int64_t(o4)); }
+ //! \overload
+ inline Error emit(uint32_t instId, OP o0, OP o1, OP o2, OP o3, OP o4, uint64_t o5) { return emit(instId, o0, o1, o2, o3, o4, int64_t(o5)); }
+
+ #undef NONE
+ #undef OP
+
+ inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t count) { return _emitOpArray(instId, operands, count); }
+
+ inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t count) {
+ setInstOptions(inst.options());
+ setExtraReg(inst.extraReg());
+ return _emitOpArray(inst.id(), operands, count);
+ }
+
+ //! \cond INTERNAL
+ //! Emits instruction having max 4 operands.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) = 0;
+ //! Emits instruction having max 6 operands.
+ virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) = 0;
+ //! Emits instruction having operands stored in array.
+ virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t count);
+ //! \endcond
+
+ //! \}
+
+ //! \name Emit Utilities
+ //! \{
+
+ ASMJIT_API Error emitProlog(const FuncFrame& layout);
+ ASMJIT_API Error emitEpilog(const FuncFrame& layout);
+ ASMJIT_API Error emitArgsAssignment(const FuncFrame& layout, const FuncArgsAssignment& args);
+
+ //! \}
+
+ //! \name Align
+ //! \{
+
+ //! Aligns the current CodeBuffer to the `alignment` specified.
+ //!
+ //! The sequence that is used to fill the gap between the aligned location
+ //! and the current location depends on the align `mode`, see `AlignMode`.
+ virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ //! Embeds raw data into the CodeBuffer.
+ virtual Error embed(const void* data, uint32_t dataSize) = 0;
+
+ //! Embeds an absolute label address as data (4 or 8 bytes).
+ virtual Error embedLabel(const Label& label) = 0;
+
+ //! Embeds a delta (distance) between the `label` and `base` calculating it
+ //! as `label - base`. This function was designed to make it easier to embed
+ //! lookup tables where each index is a relative distance of two labels.
+ virtual Error embedLabelDelta(const Label& label, const Label& base, uint32_t dataSize) = 0;
+
+ //! Embeds a constant pool at the current offset by performing the following:
+ //! 1. Aligns by using kAlignData to the minimum `pool` alignment.
+ //! 2. Binds the ConstPool label so it's bound to an aligned location.
+ //! 3. Emits ConstPool content.
+ virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
+
+ //! \}
+
+ //! \name Comment
+ //! \{
+
+ //! Emits a comment stored in `data` with an optional `size` parameter.
+ virtual Error comment(const char* data, size_t size = SIZE_MAX) = 0;
+
+ //! Emits a formatted comment specified by `fmt` and variable number of arguments.
+ ASMJIT_API Error commentf(const char* fmt, ...);
+ //! Emits a formatted comment specified by `fmt` and `ap`.
+ ASMJIT_API Error commentv(const char* fmt, va_list ap);
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ //! Called after the emitter was attached to `CodeHolder`.
+ virtual Error onAttach(CodeHolder* code) noexcept = 0;
+ //! Called after the emitter was detached from `CodeHolder`.
+ virtual Error onDetach(CodeHolder* code) noexcept = 0;
+
+ //! Called to update `_globalInstOptions` based on `_emitterOptions`.
+ //!
+ //! This function should only touch one bit `BaseInst::kOptionReserved`, which
+ //! is used to handle errors and special-cases in a way that minimizes branching.
+ ASMJIT_API void onUpdateGlobalInstOptions() noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_EMITTER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/features.h b/3rdparty/asmjit/src/asmjit/core/features.h
new file mode 100644
index 00000000000..193841c76c8
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/features.h
@@ -0,0 +1,162 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_FEATURES_H_INCLUDED
+#define ASMJIT_CORE_FEATURES_H_INCLUDED
+
+#include "../core/globals.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::BaseFeatures]
+// ============================================================================
+
+class BaseFeatures {
+public:
+ typedef Support::BitWord BitWord;
+
+ enum : uint32_t {
+ kMaxFeatures = 128,
+ kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
+ };
+
+ BitWord _bits[kNumBitWords];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline BaseFeatures() noexcept { reset(); }
+ inline BaseFeatures(const BaseFeatures& other) noexcept = default;
+ inline explicit BaseFeatures(Globals::NoInit_) noexcept {}
+
+ inline void reset() noexcept {
+ for (size_t i = 0; i < kNumBitWords; i++)
+ _bits[i] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default;
+
+ inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); }
+ inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ template<typename T>
+ inline T& as() noexcept { return static_cast<T&>(*this); }
+
+ template<typename T>
+ inline const T& as() const noexcept { return static_cast<const T&>(*this); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns all features as `BitWord` array.
+ inline BitWord* bits() noexcept { return _bits; }
+ //! Returns all features as `BitWord` array (const).
+ inline const BitWord* bits() const noexcept { return _bits; }
+
+ //! Tests whether the feature `featureId` is present.
+ inline bool has(uint32_t featureId) const noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ return bool((_bits[idx] >> bit) & 0x1);
+ }
+
+ //! Tests whether all features as defined by `other` are present.
+ inline bool hasAll(const BaseFeatures& other) const noexcept {
+ for (uint32_t i = 0; i < kNumBitWords; i++)
+ if ((_bits[i] & other._bits[i]) != other._bits[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds the given CPU `featureId` to the list of features.
+ inline void add(uint32_t featureId) noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ _bits[idx] |= BitWord(1) << bit;
+ }
+
+ template<typename... Args>
+ inline void add(uint32_t featureId, Args... otherIds) noexcept {
+ add(featureId);
+ add(otherIds...);
+ }
+
+ //! Removes the given CPU `featureId` from the list of features.
+ inline void remove(uint32_t featureId) noexcept {
+ ASMJIT_ASSERT(featureId < kMaxFeatures);
+
+ uint32_t idx = featureId / Support::kBitWordSizeInBits;
+ uint32_t bit = featureId % Support::kBitWordSizeInBits;
+
+ _bits[idx] &= ~(BitWord(1) << bit);
+ }
+
+ template<typename... Args>
+ inline void remove(uint32_t featureId, Args... otherIds) noexcept {
+ remove(featureId);
+ remove(otherIds...);
+ }
+
+ inline bool eq(const BaseFeatures& other) const noexcept {
+ for (size_t i = 0; i < kNumBitWords; i++)
+ if (_bits[i] != other._bits[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_FEATURES_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/func.cpp b/3rdparty/asmjit/src/asmjit/core/func.cpp
new file mode 100644
index 00000000000..79eab2e83ad
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/func.cpp
@@ -0,0 +1,144 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/arch.h"
+#include "../core/func.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86internal_p.h"
+ #include "../x86/x86operand.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminternal_p.h"
+ #include "../arm/armoperand.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::FuncDetail - Init / Reset]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& sign) {
+ uint32_t ccId = sign.callConv();
+ CallConv& cc = _callConv;
+
+ uint32_t argCount = sign.argCount();
+ if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ASMJIT_PROPAGATE(cc.init(ccId));
+
+ uint32_t gpSize = (cc.archId() == ArchInfo::kIdX86) ? 4 : 8;
+ uint32_t deabstractDelta = Type::deabstractDeltaOfSize(gpSize);
+
+ const uint8_t* args = sign.args();
+ for (uint32_t i = 0; i < argCount; i++) {
+ FuncValue& arg = _args[i];
+ arg.initTypeId(Type::deabstract(args[i], deabstractDelta));
+ }
+ _argCount = uint8_t(argCount);
+ _vaIndex = uint8_t(sign.vaIndex());
+
+ uint32_t ret = sign.ret();
+ if (ret != Type::kIdVoid) {
+ _rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
+ _retCount = 1;
+ }
+
+#ifdef ASMJIT_BUILD_X86
+ if (CallConv::isX86Family(ccId))
+ return x86::X86Internal::initFuncDetail(*this, sign, gpSize);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (CallConv::isArmFamily(ccId))
+ return arm::ArmInternal::initFuncDetail(*this, sign, gpSize);
+#endif
+
+ // We should never bubble here as if `cc.init()` succeeded then there has to
+ // be an implementation for the current architecture. However, stay safe.
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncFrame - Init / Reset / Finalize]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
+ uint32_t ccId = func.callConv().id();
+
+#ifdef ASMJIT_BUILD_X86
+ if (CallConv::isX86Family(ccId))
+ return x86::X86Internal::initFuncFrame(*this, func);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (CallConv::isArmFamily(ccId))
+ return arm::ArmInternal::initFuncFrame(*this, func);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId()))
+ return x86::X86Internal::finalizeFuncFrame(*this);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId()))
+ return arm::ArmInternal::finalizeFuncFrame(*this);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+// ============================================================================
+// [asmjit::FuncArgsAssignment]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
+ const FuncDetail* func = funcDetail();
+ if (!func) return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t ccId = func->callConv().id();
+
+#ifdef ASMJIT_BUILD_X86
+ if (CallConv::isX86Family(ccId))
+ return x86::X86Internal::argsToFuncFrame(*this, frame);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (CallConv::isArmFamily(ccId))
+ return arm::ArmInternal::argsToFuncFrame(*this, frame);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/func.h b/3rdparty/asmjit/src/asmjit/core/func.h
new file mode 100644
index 00000000000..36ebf9bb526
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/func.h
@@ -0,0 +1,966 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_FUNC_H_INCLUDED
+#define ASMJIT_CORE_FUNC_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/callconv.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_func
+//! \{
+
+// ============================================================================
+// [asmjit::FuncArgIndex]
+// ============================================================================
+
+//! Function argument index (lo/hi).
+enum FuncArgIndex : uint32_t {
+ //! Maximum number of function arguments supported by AsmJit.
+ kFuncArgCount = Globals::kMaxFuncArgs,
+ //! Extended maximum number of arguments (used internally).
+ kFuncArgCountLoHi = kFuncArgCount * 2,
+
+ //! Index to the LO part of function argument (default).
+ //!
+ //! This value is typically omitted and added only if there is HI argument
+ //! accessed.
+ kFuncArgLo = 0,
+
+ //! Index to the HI part of function argument.
+ //!
+ //! HI part of function argument depends on target architecture. On x86 it's
+ //! typically used to transfer 64-bit integers (they form a pair of 32-bit
+ //! integers).
+ kFuncArgHi = kFuncArgCount
+};
+
+// ============================================================================
+// [asmjit::FuncSignature]
+// ============================================================================
+
+//! Function signature.
+//!
+//! Contains information about function return type, count of arguments and
+//! their TypeIds. Function signature is a low level structure which doesn't
+//! contain platform specific or calling convention specific information.
+struct FuncSignature {
+ //! Calling convention id.
+ uint8_t _callConv;
+ //! Count of arguments.
+ uint8_t _argCount;
+ //! Index of a first VA or `kNoVarArgs`.
+ uint8_t _vaIndex;
+ //! Return value TypeId.
+ uint8_t _ret;
+ //! Function arguments TypeIds.
+ const uint8_t* _args;
+
+ enum : uint8_t {
+ //! Doesn't have variable number of arguments (`...`).
+ kNoVarArgs = 0xFF
+ };
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ //! Initializes the function signature.
+ inline void init(uint32_t ccId, uint32_t vaIndex, uint32_t ret, const uint8_t* args, uint32_t argCount) noexcept {
+ ASMJIT_ASSERT(ccId <= 0xFF);
+ ASMJIT_ASSERT(argCount <= 0xFF);
+
+ _callConv = uint8_t(ccId);
+ _argCount = uint8_t(argCount);
+ _vaIndex = uint8_t(vaIndex);
+ _ret = uint8_t(ret);
+ _args = args;
+ }
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the calling convention.
+ inline uint32_t callConv() const noexcept { return _callConv; }
+ //! Sets the calling convention to `ccId`;
+ inline void setCallConv(uint32_t ccId) noexcept { _callConv = uint8_t(ccId); }
+
+ //! Tests whether the function has variable number of arguments (...).
+ inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
+ //! Returns the variable arguments (...) index, `kNoVarArgs` if none.
+ inline uint32_t vaIndex() const noexcept { return _vaIndex; }
+ //! Sets the variable arguments (...) index to `index`.
+ inline void setVaIndex(uint32_t index) noexcept { _vaIndex = uint8_t(index); }
+ //! Resets the variable arguments index (making it a non-va function).
+ inline void resetVaIndex() noexcept { _vaIndex = kNoVarArgs; }
+
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _argCount; }
+
+ inline bool hasRet() const noexcept { return _ret != Type::kIdVoid; }
+ //! Returns the return value type.
+ inline uint32_t ret() const noexcept { return _ret; }
+
+ //! Returns the type of the argument at index `i`.
+ inline uint32_t arg(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < _argCount);
+ return _args[i];
+ }
+ //! Returns the array of function arguments' types.
+ inline const uint8_t* args() const noexcept { return _args; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncSignatureT]
+// ============================================================================
+
+template<typename... RET_ARGS>
+class FuncSignatureT : public FuncSignature {
+public:
+ inline FuncSignatureT(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept {
+ static const uint8_t ret_args[] = { (uint8_t(Type::IdOfT<RET_ARGS>::kTypeId))... };
+ init(ccId, vaIndex, ret_args[0], ret_args + 1, uint32_t(ASMJIT_ARRAY_SIZE(ret_args) - 1));
+ }
+};
+
+// ============================================================================
+// [asmjit::FuncSignatureBuilder]
+// ============================================================================
+
+//! Function signature builder.
+class FuncSignatureBuilder : public FuncSignature {
+public:
+ uint8_t _builderArgList[kFuncArgCount];
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ inline FuncSignatureBuilder(uint32_t ccId = CallConv::kIdHost, uint32_t vaIndex = kNoVarArgs) noexcept {
+ init(ccId, vaIndex, Type::kIdVoid, _builderArgList, 0);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Sets the return type to `retType`.
+ inline void setRet(uint32_t retType) noexcept { _ret = uint8_t(retType); }
+ //! Sets the return type based on `T`.
+ template<typename T>
+ inline void setRetT() noexcept { setRet(Type::IdOfT<T>::kTypeId); }
+
+ //! Sets the argument at index `index` to `argType`.
+ inline void setArg(uint32_t index, uint32_t argType) noexcept {
+ ASMJIT_ASSERT(index < _argCount);
+ _builderArgList[index] = uint8_t(argType);
+ }
+ //! Sets the argument at index `i` to the type based on `T`.
+ template<typename T>
+ inline void setArgT(uint32_t index) noexcept { setArg(index, Type::IdOfT<T>::kTypeId); }
+
+ //! Appends an argument of `type` to the function prototype.
+ inline void addArg(uint32_t type) noexcept {
+ ASMJIT_ASSERT(_argCount < kFuncArgCount);
+ _builderArgList[_argCount++] = uint8_t(type);
+ }
+ //! Appends an argument of type based on `T` to the function prototype.
+ template<typename T>
+ inline void addArgT() noexcept { addArg(Type::IdOfT<T>::kTypeId); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncValue]
+// ============================================================================
+
+//! Argument or return value as defined by `FuncSignature`, but with register
+//! or stack address (and other metadata) assigned to it.
+struct FuncValue {
+ uint32_t _data;
+
+ enum Parts : uint32_t {
+ kTypeIdShift = 0, //!< TypeId shift.
+ kTypeIdMask = 0x000000FFu, //!< TypeId mask.
+
+ kFlagIsReg = 0x00000100u, //!< Passed by register.
+ kFlagIsStack = 0x00000200u, //!< Passed by stack.
+ kFlagIsIndirect = 0x00000400u, //!< Passed indirectly by reference (internally a pointer).
+ kFlagIsDone = 0x00000800u, //!< Used internally by arguments allocator.
+
+ kStackOffsetShift = 12, //!< Stack offset shift.
+ kStackOffsetMask = 0xFFFFF000u, //!< Stack offset mask (must occupy MSB bits).
+
+ kRegIdShift = 16, //!< RegId shift.
+ kRegIdMask = 0x00FF0000u, //!< RegId mask.
+
+ kRegTypeShift = 24, //!< RegType shift.
+ kRegTypeMask = 0xFF000000u //!< RegType mask.
+ };
+
+ //! \name Initializtion & Reset
+ //! \{
+
+ // These initialize the whole `FuncValue` to either register or stack. Useful
+ // when you know all of these properties and wanna just set it up.
+
+ //! Initializes the `typeId` of this `FuncValue`.
+ inline void initTypeId(uint32_t typeId) noexcept {
+ _data = typeId << kTypeIdShift;
+ }
+
+ inline void initReg(uint32_t regType, uint32_t regId, uint32_t typeId, uint32_t flags = 0) noexcept {
+ _data = (regType << kRegTypeShift) | (regId << kRegIdShift) | (typeId << kTypeIdShift) | kFlagIsReg | flags;
+ }
+
+ inline void initStack(int32_t offset, uint32_t typeId) noexcept {
+ _data = (uint32_t(offset) << kStackOffsetShift) | (typeId << kTypeIdShift) | kFlagIsStack;
+ }
+
+ //! Resets the value to its unassigned state.
+ inline void reset() noexcept { _data = 0; }
+
+ //! \}
+
+ //! \name Assign
+ //! \{
+
+ // These initialize only part of `FuncValue`, useful when building `FuncValue`
+ // incrementally. The caller should first init the type-id by caliing `initTypeId`
+ // and then continue building either register or stack.
+
+ inline void assignRegData(uint32_t regType, uint32_t regId) noexcept {
+ ASMJIT_ASSERT((_data & (kRegTypeMask | kRegIdMask)) == 0);
+ _data |= (regType << kRegTypeShift) | (regId << kRegIdShift) | kFlagIsReg;
+ }
+
+ inline void assignStackOffset(int32_t offset) noexcept {
+ ASMJIT_ASSERT((_data & kStackOffsetMask) == 0);
+ _data |= (uint32_t(offset) << kStackOffsetShift) | kFlagIsStack;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline void _replaceValue(uint32_t mask, uint32_t value) noexcept { _data = (_data & ~mask) | value; }
+
+ //! Tests whether the `FuncValue` has a flag `flag` set.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_data & flag) != 0; }
+ //! Adds `flags` to `FuncValue`.
+ inline void addFlags(uint32_t flags) noexcept { _data |= flags; }
+ //! Clears `flags` of `FuncValue`.
+ inline void clearFlags(uint32_t flags) noexcept { _data &= ~flags; }
+
+ //! Tests whether the value is initialized (i.e. contains a valid data).
+ inline bool isInitialized() const noexcept { return _data != 0; }
+ //! Tests whether the argument is passed by register.
+ inline bool isReg() const noexcept { return hasFlag(kFlagIsReg); }
+ //! Tests whether the argument is passed by stack.
+ inline bool isStack() const noexcept { return hasFlag(kFlagIsStack); }
+ //! Tests whether the argument is passed by register.
+ inline bool isAssigned() const noexcept { return hasFlag(kFlagIsReg | kFlagIsStack); }
+ //! Tests whether the argument is passed through a pointer (used by WIN64 to pass XMM|YMM|ZMM).
+ inline bool isIndirect() const noexcept { return hasFlag(kFlagIsIndirect); }
+
+ //! Tests whether the argument was already processed (used internally).
+ inline bool isDone() const noexcept { return hasFlag(kFlagIsDone); }
+
+ //! Returns a register type of the register used to pass function argument or return value.
+ inline uint32_t regType() const noexcept { return (_data & kRegTypeMask) >> kRegTypeShift; }
+ //! Sets a register type of the register used to pass function argument or return value.
+ inline void setRegType(uint32_t regType) noexcept { _replaceValue(kRegTypeMask, regType << kRegTypeShift); }
+
+ //! Returns a physical id of the register used to pass function argument or return value.
+ inline uint32_t regId() const noexcept { return (_data & kRegIdMask) >> kRegIdShift; }
+ //! Sets a physical id of the register used to pass function argument or return value.
+ inline void setRegId(uint32_t regId) noexcept { _replaceValue(kRegIdMask, regId << kRegIdShift); }
+
+ //! Returns a stack offset of this argument.
+ inline int32_t stackOffset() const noexcept { return int32_t(_data & kStackOffsetMask) >> kStackOffsetShift; }
+ //! Sets a stack offset of this argument.
+ inline void setStackOffset(int32_t offset) noexcept { _replaceValue(kStackOffsetMask, uint32_t(offset) << kStackOffsetShift); }
+
+ //! Tests whether the argument or return value has associated `Type::Id`.
+ inline bool hasTypeId() const noexcept { return (_data & kTypeIdMask) != 0; }
+ //! Returns a TypeId of this argument or return value.
+ inline uint32_t typeId() const noexcept { return (_data & kTypeIdMask) >> kTypeIdShift; }
+ //! Sets a TypeId of this argument or return value.
+ inline void setTypeId(uint32_t typeId) noexcept { _replaceValue(kTypeIdMask, typeId << kTypeIdShift); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncDetail]
+// ============================================================================
+
+//! Function detail - CallConv and expanded FuncSignature.
+//!
+//! Function detail is architecture and OS dependent representation of a function.
+//! It contains calling convention and expanded function signature so all
+//! arguments have assigned either register type & id or stack address.
+class FuncDetail {
+public:
+ //! Calling convention.
+ CallConv _callConv;
+ //! Number of function arguments.
+ uint8_t _argCount;
+ //! Number of function return values.
+ uint8_t _retCount;
+ //! Variable arguments index of `kNoVarArgs`.
+ uint8_t _vaIndex;
+ //! Reserved for future use.
+ uint8_t _reserved;
+ //! Registers that contains arguments.
+ uint32_t _usedRegs[BaseReg::kGroupVirt];
+ //! Size of arguments passed by stack.
+ uint32_t _argStackSize;
+ //! Function return values.
+ FuncValue _rets[2];
+ //! Function arguments.
+ FuncValue _args[kFuncArgCountLoHi];
+
+ enum : uint8_t {
+ //! Doesn't have variable number of arguments (`...`).
+ kNoVarArgs = 0xFF
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline FuncDetail() noexcept { reset(); }
+ inline FuncDetail(const FuncDetail& other) noexcept = default;
+
+ //! Initializes this `FuncDetail` to the given signature.
+ ASMJIT_API Error init(const FuncSignature& sign);
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the function's calling convention, see `CallConv`.
+ inline const CallConv& callConv() const noexcept { return _callConv; }
+
+ //! Returns the associated calling convention flags, see `CallConv::Flags`.
+ inline uint32_t flags() const noexcept { return _callConv.flags(); }
+ //! Checks whether a CallConv `flag` is set, see `CallConv::Flags`.
+ inline bool hasFlag(uint32_t ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); }
+
+ //! Returns count of function return values.
+ inline uint32_t retCount() const noexcept { return _retCount; }
+ //! Returns the number of function arguments.
+ inline uint32_t argCount() const noexcept { return _argCount; }
+
+ //! Tests whether the function has a return value.
+ inline bool hasRet() const noexcept { return _retCount != 0; }
+ //! Returns function return value associated with the given `index`.
+ inline FuncValue& ret(uint32_t index = 0) noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
+ return _rets[index];
+ }
+ //! Returns function return value associated with the given `index` (const).
+ inline const FuncValue& ret(uint32_t index = 0) const noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
+ return _rets[index];
+ }
+
+ //! Returns function arguments array.
+ inline FuncValue* args() noexcept { return _args; }
+ //! Returns function arguments array (const).
+ inline const FuncValue* args() const noexcept { return _args; }
+
+ inline bool hasArg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index].isInitialized();
+ }
+
+ //! Returns function argument at the given `index`.
+ inline FuncValue& arg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index];
+ }
+
+ //! Returnsfunction argument at the given index `index` (const).
+ inline const FuncValue& arg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ return _args[index];
+ }
+
+ inline void resetArg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < kFuncArgCountLoHi);
+ _args[index].reset();
+ }
+
+ inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
+ inline uint32_t vaIndex() const noexcept { return _vaIndex; }
+
+ //! Tests whether the function passes one or more argument by stack.
+ inline bool hasStackArgs() const noexcept { return _argStackSize != 0; }
+ //! Returns stack size needed for function arguments passed on the stack.
+ inline uint32_t argStackSize() const noexcept { return _argStackSize; }
+
+ inline uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); }
+ inline uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); }
+ inline uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); }
+
+ inline uint32_t passedRegs(uint32_t group) const noexcept { return _callConv.passedRegs(group); }
+ inline uint32_t preservedRegs(uint32_t group) const noexcept { return _callConv.preservedRegs(group); }
+
+ inline uint32_t usedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _usedRegs[group];
+ }
+
+ inline void addUsedRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _usedRegs[group] |= regs;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncFrame]
+// ============================================================================
+
+//! Function frame.
+//!
+//! Function frame is used directly by prolog and epilog insertion (PEI) utils.
+//! It provides information necessary to insert a proper and ABI comforming
+//! prolog and epilog. Function frame calculation is based on `CallConv` and
+//! other function attributes.
+//!
+//! Function Frame Structure
+//! ------------------------
+//!
+//! Various properties can contribute to the size and structure of the function
+//! frame. The function frame in most cases won't use all of the properties
+//! illustrated (for example Spill Zone and Red Zone are never used together).
+//!
+//! +-----------------------------+
+//! | Arguments Passed by Stack |
+//! +-----------------------------+
+//! | Spill Zone |
+//! +-----------------------------+ <- Stack offset (args) starts from here.
+//! | Return Address if Pushed |
+//! +-----------------------------+ <- Stack pointer (SP) upon entry.
+//! | Save/Restore Stack. |
+//! +-----------------------------+-----------------------------+
+//! | Local Stack | |
+//! +-----------------------------+ Final Stack |
+//! | Call Stack | |
+//! +-----------------------------+-----------------------------+ <- SP after prolog.
+//! | Red Zone |
+//! +-----------------------------+
+class FuncFrame {
+public:
+ enum Tag : uint32_t {
+ kTagInvalidOffset = 0xFFFFFFFFu //!< Tag used to inform that some offset is invalid.
+ };
+
+ //! Attributes are designed in a way that all are initially false, and user
+ //! or FuncFrame finalizer adds them when necessary.
+ enum Attributes : uint32_t {
+ kAttrHasVarArgs = 0x00000001u, //!< Function has variable number of arguments.
+ kAttrHasPreservedFP = 0x00000010u, //!< Preserve frame pointer (don't omit FP).
+ kAttrHasFuncCalls = 0x00000020u, //!< Function calls other functions (is not leaf).
+
+ kAttrX86AvxEnabled = 0x00010000u, //!< Use AVX instead of SSE for all operations (X86).
+ kAttrX86AvxCleanup = 0x00020000u, //!< Emit VZEROUPPER instruction in epilog (X86).
+ kAttrX86MmxCleanup = 0x00040000u, //!< Emit EMMS instruction in epilog (X86).
+
+ kAttrAlignedVecSR = 0x40000000u, //!< Function has aligned save/restore of vector registers.
+ kAttrIsFinalized = 0x80000000u //!< FuncFrame is finalized and can be used by PEI.
+ };
+
+ //! Function attributes.
+ uint32_t _attributes;
+
+ //! Architecture ID.
+ uint8_t _archId;
+ //! SP register ID (to access call stack and local stack).
+ uint8_t _spRegId;
+ //! SA register ID (to access stack arguments).
+ uint8_t _saRegId;
+
+ //! Red zone size (copied from CallConv).
+ uint8_t _redZoneSize;
+ //! Spill zone size (copied from CallConv).
+ uint8_t _spillZoneSize;
+ //! Natural stack alignment (copied from CallConv).
+ uint8_t _naturalStackAlignment;
+ //! Minimum stack alignment to turn on dynamic alignment.
+ uint8_t _minDynamicAlignment;
+
+ //! Call stack alignment.
+ uint8_t _callStackAlignment;
+ //! Local stack alignment.
+ uint8_t _localStackAlignment;
+ //! Final stack alignment.
+ uint8_t _finalStackAlignment;
+
+ //! Adjustment of the stack before returning (X86-STDCALL).
+ uint16_t _calleeStackCleanup;
+
+ //! Call stack size.
+ uint32_t _callStackSize;
+ //! Local stack size.
+ uint32_t _localStackSize;
+ //! Final stack size (sum of call stack and local stack).
+ uint32_t _finalStackSize;
+
+ //! Local stack offset (non-zero only if call stack is used).
+ uint32_t _localStackOffset;
+ //! Offset relative to SP that contains previous SP (before alignment).
+ uint32_t _daOffset;
+ //! Offset of the first stack argument relative to SP.
+ uint32_t _saOffsetFromSP;
+ //! Offset of the first stack argument relative to SA (_saRegId or FP).
+ uint32_t _saOffsetFromSA;
+
+ //! Local stack adjustment in prolog/epilog.
+ uint32_t _stackAdjustment;
+
+ //! Registers that are dirty.
+ uint32_t _dirtyRegs[BaseReg::kGroupVirt];
+ //! Registers that must be preserved (copied from CallConv).
+ uint32_t _preservedRegs[BaseReg::kGroupVirt];
+
+ //! Final stack size required to save GP regs.
+ uint16_t _gpSaveSize;
+ //! Final Stack size required to save other than GP regs.
+ uint16_t _nonGpSaveSize;
+ //! Final offset where saved GP regs are stored.
+ uint32_t _gpSaveOffset;
+ //! Final offset where saved other than GP regs are stored.
+ uint32_t _nonGpSaveOffset;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline FuncFrame() noexcept { reset(); }
+ inline FuncFrame(const FuncFrame& other) noexcept = default;
+
+ ASMJIT_API Error init(const FuncDetail& func) noexcept;
+
+ inline void reset() noexcept {
+ memset(this, 0, sizeof(FuncFrame));
+ _spRegId = BaseReg::kIdBad;
+ _saRegId = BaseReg::kIdBad;
+ _daOffset = kTagInvalidOffset;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the target architecture of the function frame.
+ inline uint32_t archId() const noexcept { return _archId; }
+
+ //! Returns function frame attributes, see `Attributes`.
+ inline uint32_t attributes() const noexcept { return _attributes; }
+ //! Checks whether the FuncFame contains an attribute `attr`.
+ inline bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; }
+ //! Adds attributes `attrs` to the FuncFrame.
+ inline void addAttributes(uint32_t attrs) noexcept { _attributes |= attrs; }
+ //! Clears attributes `attrs` from the FrameFrame.
+ inline void clearAttributes(uint32_t attrs) noexcept { _attributes &= ~attrs; }
+
+ //! Tests whether the function has variable number of arguments.
+ inline bool hasVarArgs() const noexcept { return hasAttribute(kAttrHasVarArgs); }
+ //! Sets the variable arguments flag.
+ inline void setVarArgs() noexcept { addAttributes(kAttrHasVarArgs); }
+ //! Resets variable arguments flag.
+ inline void resetVarArgs() noexcept { clearAttributes(kAttrHasVarArgs); }
+
+ //! Tests whether the function preserves frame pointer (EBP|ESP on X86).
+ inline bool hasPreservedFP() const noexcept { return hasAttribute(kAttrHasPreservedFP); }
+ //! Enables preserved frame pointer.
+ inline void setPreservedFP() noexcept { addAttributes(kAttrHasPreservedFP); }
+ //! Disables preserved frame pointer.
+ inline void resetPreservedFP() noexcept { clearAttributes(kAttrHasPreservedFP); }
+
+ //! Tests whether the function calls other functions.
+ inline bool hasFuncCalls() const noexcept { return hasAttribute(kAttrHasFuncCalls); }
+ //! Sets `kFlagHasCalls` to true.
+ inline void setFuncCalls() noexcept { addAttributes(kAttrHasFuncCalls); }
+ //! Sets `kFlagHasCalls` to false.
+ inline void resetFuncCalls() noexcept { clearAttributes(kAttrHasFuncCalls); }
+
+ //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog.
+ inline bool hasAvxCleanup() const noexcept { return hasAttribute(kAttrX86AvxCleanup); }
+ //! Enables AVX cleanup.
+ inline void setAvxCleanup() noexcept { addAttributes(kAttrX86AvxCleanup); }
+ //! Disables AVX cleanup.
+ inline void resetAvxCleanup() noexcept { clearAttributes(kAttrX86AvxCleanup); }
+
+ //! Tests whether the function contains AVX cleanup - 'vzeroupper' instruction in epilog.
+ inline bool isAvxEnabled() const noexcept { return hasAttribute(kAttrX86AvxEnabled); }
+ //! Enables AVX cleanup.
+ inline void setAvxEnabled() noexcept { addAttributes(kAttrX86AvxEnabled); }
+ //! Disables AVX cleanup.
+ inline void resetAvxEnabled() noexcept { clearAttributes(kAttrX86AvxEnabled); }
+
+ //! Tests whether the function contains MMX cleanup - 'emms' instruction in epilog.
+ inline bool hasMmxCleanup() const noexcept { return hasAttribute(kAttrX86MmxCleanup); }
+ //! Enables MMX cleanup.
+ inline void setMmxCleanup() noexcept { addAttributes(kAttrX86MmxCleanup); }
+ //! Disables MMX cleanup.
+ inline void resetMmxCleanup() noexcept { clearAttributes(kAttrX86MmxCleanup); }
+
+ //! Tests whether the function uses call stack.
+ inline bool hasCallStack() const noexcept { return _callStackSize != 0; }
+ //! Tests whether the function uses local stack.
+ inline bool hasLocalStack() const noexcept { return _localStackSize != 0; }
+ //! Tests whether vector registers can be saved and restored by using aligned reads and writes.
+ inline bool hasAlignedVecSR() const noexcept { return hasAttribute(kAttrAlignedVecSR); }
+ //! Tests whether the function has to align stack dynamically.
+ inline bool hasDynamicAlignment() const noexcept { return _finalStackAlignment >= _minDynamicAlignment; }
+
+ //! Tests whether the calling convention specifies 'RedZone'.
+ inline bool hasRedZone() const noexcept { return _redZoneSize != 0; }
+ //! Tests whether the calling convention specifies 'SpillZone'.
+ inline bool hasSpillZone() const noexcept { return _spillZoneSize != 0; }
+
+ //! Returns the size of 'RedZone'.
+ inline uint32_t redZoneSize() const noexcept { return _redZoneSize; }
+ //! Returns the size of 'SpillZone'.
+ inline uint32_t spillZoneSize() const noexcept { return _spillZoneSize; }
+ //! Returns natural stack alignment (guaranteed stack alignment upon entry).
+ inline uint32_t naturalStackAlignment() const noexcept { return _naturalStackAlignment; }
+ //! Returns natural stack alignment (guaranteed stack alignment upon entry).
+ inline uint32_t minDynamicAlignment() const noexcept { return _minDynamicAlignment; }
+
+ //! Tests whether the callee must adjust SP before returning (X86-STDCALL only)
+ inline bool hasCalleeStackCleanup() const noexcept { return _calleeStackCleanup != 0; }
+ //! Returns home many bytes of the stack the the callee must adjust before returning (X86-STDCALL only)
+ inline uint32_t calleeStackCleanup() const noexcept { return _calleeStackCleanup; }
+
+ //! Returns call stack alignment.
+ inline uint32_t callStackAlignment() const noexcept { return _callStackAlignment; }
+ //! Returns local stack alignment.
+ inline uint32_t localStackAlignment() const noexcept { return _localStackAlignment; }
+ //! Returns final stack alignment (the maximum value of call, local, and natural stack alignments).
+ inline uint32_t finalStackAlignment() const noexcept { return _finalStackAlignment; }
+
+ //! Sets call stack alignment.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void setCallStackAlignment(uint32_t alignment) noexcept {
+ _callStackAlignment = uint8_t(alignment);
+ _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment);
+ }
+
+ //! Sets local stack alignment.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void setLocalStackAlignment(uint32_t value) noexcept {
+ _localStackAlignment = uint8_t(value);
+ _finalStackAlignment = Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment);
+ }
+
+ //! Combines call stack alignment with `alignment`, updating it to the greater value.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void updateCallStackAlignment(uint32_t alignment) noexcept {
+ _callStackAlignment = uint8_t(Support::max<uint32_t>(_callStackAlignment, alignment));
+ _finalStackAlignment = Support::max(_finalStackAlignment, _callStackAlignment);
+ }
+
+ //! Combines local stack alignment with `alignment`, updating it to the greater value.
+ //!
+ //! \note This also updates the final stack alignment.
+ inline void updateLocalStackAlignment(uint32_t alignment) noexcept {
+ _localStackAlignment = uint8_t(Support::max<uint32_t>(_localStackAlignment, alignment));
+ _finalStackAlignment = Support::max(_finalStackAlignment, _localStackAlignment);
+ }
+
+ //! Returns call stack size.
+ inline uint32_t callStackSize() const noexcept { return _callStackSize; }
+ //! Returns local stack size.
+ inline uint32_t localStackSize() const noexcept { return _localStackSize; }
+
+ //! Sets call stack size.
+ inline void setCallStackSize(uint32_t size) noexcept { _callStackSize = size; }
+ //! Sets local stack size.
+ inline void setLocalStackSize(uint32_t size) noexcept { _localStackSize = size; }
+
+ //! Combines call stack size with `size`, updating it to the greater value.
+ inline void updateCallStackSize(uint32_t size) noexcept { _callStackSize = Support::max(_callStackSize, size); }
+ //! Combines local stack size with `size`, updating it to the greater value.
+ inline void updateLocalStackSize(uint32_t size) noexcept { _localStackSize = Support::max(_localStackSize, size); }
+
+ //! Returns final stack size (only valid after the FuncFrame is finalized).
+ inline uint32_t finalStackSize() const noexcept { return _finalStackSize; }
+
+ //! Returns an offset to access the local stack (non-zero only if call stack is used).
+ inline uint32_t localStackOffset() const noexcept { return _localStackOffset; }
+
+ //! Tests whether the function prolog/epilog requires a memory slot for storing unaligned SP.
+ inline bool hasDAOffset() const noexcept { return _daOffset != kTagInvalidOffset; }
+ //! Returns a memory offset used to store DA (dynamic alignment) slot (relative to SP).
+ inline uint32_t daOffset() const noexcept { return _daOffset; }
+
+ inline uint32_t saOffset(uint32_t regId) const noexcept {
+ return regId == _spRegId ? saOffsetFromSP()
+ : saOffsetFromSA();
+ }
+
+ inline uint32_t saOffsetFromSP() const noexcept { return _saOffsetFromSP; }
+ inline uint32_t saOffsetFromSA() const noexcept { return _saOffsetFromSA; }
+
+ //! Returns mask of registers of the given register `group` that are modified
+ //! by the function. The engine would then calculate which registers must be
+ //! saved & restored by the function by using the data provided by the calling
+ //! convention.
+ inline uint32_t dirtyRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _dirtyRegs[group];
+ }
+
+ //! Sets which registers (as a mask) are modified by the function.
+ //!
+ //! \remarks Please note that this will completely overwrite the existing
+ //! register mask, use `addDirtyRegs()` to modify the existing register
+ //! mask.
+ inline void setDirtyRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] = regs;
+ }
+
+ //! Adds which registers (as a mask) are modified by the function.
+ inline void addDirtyRegs(uint32_t group, uint32_t regs) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] |= regs;
+ }
+
+ //! \overload
+ inline void addDirtyRegs(const BaseReg& reg) noexcept {
+ ASMJIT_ASSERT(reg.id() < Globals::kMaxPhysRegs);
+ addDirtyRegs(reg.group(), Support::bitMask(reg.id()));
+ }
+
+ //! \overload
+ template<typename... Args>
+ ASMJIT_INLINE void addDirtyRegs(const BaseReg& reg, Args&&... args) noexcept {
+ addDirtyRegs(reg);
+ addDirtyRegs(std::forward<Args>(args)...);
+ }
+
+ inline void setAllDirty() noexcept {
+ _dirtyRegs[0] = 0xFFFFFFFFu;
+ _dirtyRegs[1] = 0xFFFFFFFFu;
+ _dirtyRegs[2] = 0xFFFFFFFFu;
+ _dirtyRegs[3] = 0xFFFFFFFFu;
+ }
+
+ inline void setAllDirty(uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ _dirtyRegs[group] = 0xFFFFFFFFu;
+ }
+
+ //! Returns a calculated mask of registers of the given `group` that will be
+ //! saved and restored in the function's prolog and epilog, respectively. The
+ //! register mask is calculated from both `dirtyRegs` (provided by user) and
+ //! `preservedMask` (provided by the calling convention).
+ inline uint32_t savedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _dirtyRegs[group] & _preservedRegs[group];
+ }
+
+ //! Returns the mask of preserved registers of the given register `group`.
+ //!
+ //! Preserved registers are those that must survive the function call
+ //! unmodified. The function can only modify preserved registers it they
+ //! are saved and restored in funciton's prolog and epilog, respectively.
+ inline uint32_t preservedRegs(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _preservedRegs[group];
+ }
+
+ inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
+ inline uint32_t saRegId() const noexcept { return _saRegId; }
+ inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
+ inline void resetSARegId() { setSARegId(BaseReg::kIdBad); }
+
+ //! Returns stack size required to save GP registers.
+ inline uint32_t gpSaveSize() const noexcept { return _gpSaveSize; }
+ //! Returns stack size required to save other than GP registers (MM, XMM|YMM|ZMM, K, VFP, etc...).
+ inline uint32_t nonGpSaveSize() const noexcept { return _nonGpSaveSize; }
+
+ //! Returns an offset to the stack where general purpose registers are saved.
+ inline uint32_t gpSaveOffset() const noexcept { return _gpSaveOffset; }
+ //! Returns an offset to the stack where other than GP registers are saved.
+ inline uint32_t nonGpSaveOffset() const noexcept { return _nonGpSaveOffset; }
+
+ //! Tests whether the functions contains stack adjustment.
+ inline bool hasStackAdjustment() const noexcept { return _stackAdjustment != 0; }
+ //! Returns function's stack adjustment used in function's prolog and epilog.
+ //!
+ //! If the returned value is zero it means that the stack is not adjusted.
+ //! This can mean both that the stack is not used and/or the stack is only
+ //! adjusted by instructions that pust/pop registers into/from stack.
+ inline uint32_t stackAdjustment() const noexcept { return _stackAdjustment; }
+
+ //! \}
+
+ //! \name Finaliztion
+ //! \{
+
+ ASMJIT_API Error finalize() noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FuncArgsAssignment]
+// ============================================================================
+
+//! A helper class that can be used to assign a physical register for each
+//! function argument. Use with `BaseEmitter::emitArgsAssignment()`.
+class FuncArgsAssignment {
+public:
+ //! Function detail.
+ const FuncDetail* _funcDetail;
+ //! Register that can be used to access arguments passed by stack.
+ uint8_t _saRegId;
+ //! Reserved for future use.
+ uint8_t _reserved[3];
+ //! Mapping of each function argument.
+ FuncValue _args[kFuncArgCountLoHi];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit FuncArgsAssignment(const FuncDetail* fd = nullptr) noexcept { reset(fd); }
+
+ inline FuncArgsAssignment(const FuncArgsAssignment& other) noexcept {
+ memcpy(this, &other, sizeof(*this));
+ }
+
+ inline void reset(const FuncDetail* fd = nullptr) noexcept {
+ _funcDetail = fd;
+ _saRegId = uint8_t(BaseReg::kIdBad);
+ memset(_reserved, 0, sizeof(_reserved));
+ memset(_args, 0, sizeof(_args));
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline const FuncDetail* funcDetail() const noexcept { return _funcDetail; }
+ inline void setFuncDetail(const FuncDetail* fd) noexcept { _funcDetail = fd; }
+
+ inline bool hasSARegId() const noexcept { return _saRegId != BaseReg::kIdBad; }
+ inline uint32_t saRegId() const noexcept { return _saRegId; }
+ inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
+ inline void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); }
+
+ inline FuncValue& arg(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
+ return _args[index];
+ }
+ inline const FuncValue& arg(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
+ return _args[index];
+ }
+
+ inline bool isAssigned(uint32_t argIndex) const noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ return _args[argIndex].isAssigned();
+ }
+
+ inline void assignReg(uint32_t argIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ ASMJIT_ASSERT(reg.isPhysReg());
+ _args[argIndex].initReg(reg.type(), reg.id(), typeId);
+ }
+
+ inline void assignReg(uint32_t argIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ _args[argIndex].initReg(regType, regId, typeId);
+ }
+
+ inline void assignStack(uint32_t argIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) {
+ ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
+ _args[argIndex].initStack(offset, typeId);
+ }
+
+ // NOTE: All `assignAll()` methods are shortcuts to assign all arguments at
+ // once, however, since registers are passed all at once these initializers
+ // don't provide any way to pass TypeId and/or to keep any argument between
+ // the arguments passed unassigned.
+ inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg) noexcept {
+ assignReg(argIndex, reg);
+ }
+
+ template<typename... Args>
+ inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg, Args&&... args) noexcept {
+ assignReg(argIndex, reg);
+ _assignAllInternal(argIndex + 1, std::forward<Args>(args)...);
+ }
+
+ template<typename... Args>
+ inline void assignAll(Args&&... args) noexcept {
+ _assignAllInternal(0, std::forward<Args>(args)...);
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Update `FuncFrame` based on function's arguments assignment.
+ //!
+ //! \note You MUST call this in orher to use `BaseEmitter::emitArgsAssignment()`,
+ //! otherwise the FuncFrame would not contain the information necessary to
+ //! assign all arguments into the registers and/or stack specified.
+ ASMJIT_API Error updateFuncFrame(FuncFrame& frame) const noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_FUNC_H_INCLUDED
+
diff --git a/3rdparty/asmjit/src/asmjit/core/globals.cpp b/3rdparty/asmjit/src/asmjit/core/globals.cpp
new file mode 100644
index 00000000000..426fce8f533
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/globals.cpp
@@ -0,0 +1,131 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/globals.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::DebugUtils]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
+#ifndef ASMJIT_NO_TEXT
+ static const char errorMessages[] =
+ "Ok\0"
+ "Out of memory\0"
+ "Invalid argument\0"
+ "Invalid state\0"
+ "Invalid architecture\0"
+ "Not initialized\0"
+ "Already initialized\0"
+ "Feature not enabled\0"
+ "Too many handles or file descriptors\0"
+ "Too large (code or memory request)\0"
+ "No code generated\0"
+ "Invalid directive\0"
+ "Invalid label\0"
+ "Too many labels\0"
+ "Label already bound\0"
+ "Label already defined\0"
+ "Label name too long\0"
+ "Invalid label name\0"
+ "Invalid parent label\0"
+ "Non-local label can't have parent\0"
+ "Invalid section\0"
+ "Too many sections\0"
+ "Invalid section name\0"
+ "Too many relocations\0"
+ "Invalid relocation entry\0"
+ "Relocation offset out of range\0"
+ "Invalid assignment\0"
+ "Invalid instruction\0"
+ "Invalid register type\0"
+ "Invalid register group\0"
+ "Invalid register physical id\0"
+ "Invalid register virtual id\0"
+ "Invalid prefix combination\0"
+ "Invalid lock prefix\0"
+ "Invalid xacquire prefix\0"
+ "Invalid xrelease prefix\0"
+ "Invalid rep prefix\0"
+ "Invalid rex prefix\0"
+ "Invalid {...} register \0"
+ "Invalid use of {k}\0"
+ "Invalid use of {k}{z}\0"
+ "Invalid broadcast {1tox}\0"
+ "Invalid {er} or {sae} option\0"
+ "Invalid address\0"
+ "Invalid address index\0"
+ "Invalid address scale\0"
+ "Invalid use of 64-bit address or offset\0"
+ "Invalid use of 64-bit address or offset that requires 32-bit zero-extension\0"
+ "Invalid displacement\0"
+ "Invalid segment\0"
+ "Invalid immediate value\0"
+ "Invalid operand size\0"
+ "Ambiguous operand size\0"
+ "Operand size mismatch\0"
+ "Invalid option\0"
+ "Option already defined\0"
+ "Invalid type-info\0"
+ "Invalid use of a low 8-bit GPB register\0"
+ "Invalid use of a 64-bit GPQ register in 32-bit mode\0"
+ "Invalid use of an 80-bit float\0"
+ "Not consecutive registers\0"
+ "No more physical registers\0"
+ "Overlapped registers\0"
+ "Overlapping register and arguments base-address register\0"
+ "Unbound label cannot be evaluated by expression\0"
+ "Arithmetic overflow during expression evaluation\0"
+ "Unknown error\0";
+ return Support::findPackedString(errorMessages, Support::min<Error>(err, kErrorCount));
+#else
+ DebugUtils::unused(err);
+ static const char noMessage[] = "";
+ return noMessage;
+#endif
+}
+
+ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
+#if defined(_WIN32)
+ ::OutputDebugStringA(str);
+#else
+ ::fputs(str, stderr);
+#endif
+}
+
+ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
+ char str[1024];
+
+ snprintf(str, 1024,
+ "[asmjit] Assertion failed at %s (line %d):\n"
+ "[asmjit] %s\n", file, line, msg);
+
+ debugOutput(str);
+ ::abort();
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/globals.h b/3rdparty/asmjit/src/asmjit/core/globals.h
new file mode 100644
index 00000000000..6373b7e8c6a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/globals.h
@@ -0,0 +1,425 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
+#define ASMJIT_CORE_GLOBALS_H_INCLUDED
+
+#include "../core/api-config.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Support]
+// ============================================================================
+
+//! \cond INTERNAL
+//! \addtogroup Support
+//! \{
+namespace Support {
+ //! Cast designed to cast between function and void* pointers.
+ template<typename Dst, typename Src>
+ static inline Dst ptr_cast_impl(Src p) noexcept { return (Dst)p; }
+} // {Support}
+
+#if defined(ASMJIT_NO_STDCXX)
+namespace Support {
+ ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
+ ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
+} // {Support}
+
+#define ASMJIT_BASE_CLASS(TYPE) \
+ ASMJIT_INLINE void* operator new(size_t n) noexcept { \
+ return Support::operatorNew(n); \
+ } \
+ \
+ ASMJIT_INLINE void operator delete(void* p) noexcept { \
+ Support::operatorDelete(p); \
+ } \
+ \
+ ASMJIT_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
+ ASMJIT_INLINE void operator delete(void*, void*) noexcept {}
+#else
+#define ASMJIT_BASE_CLASS(TYPE)
+#endif
+
+//! \}
+//! \endcond
+
+// ============================================================================
+// [asmjit::Globals]
+// ============================================================================
+
+//! \addtogroup asmjit_core
+//! \{
+
+//! Contains typedefs, constants, and variables used globally by AsmJit.
+namespace Globals {
+
+// ============================================================================
+// [asmjit::Globals::<global>]
+// ============================================================================
+
+//! Host memory allocator overhead.
+constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
+
+//! Host memory allocator alignment.
+constexpr uint32_t kAllocAlignment = 8;
+
+//! Aggressive growing strategy threshold.
+constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
+
+//! Maximum height of RB-Tree is:
+//!
+//! `2 * log2(n + 1)`.
+//!
+//! Size of RB node is at least two pointers (without data),
+//! so a theoretical architecture limit would be:
+//!
+//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
+//!
+//! Which yields 30 on 32-bit arch and 61 on 64-bit arch.
+//! The final value was adjusted by +1 for safety reasons.
+constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
+
+//! Maximum number of operands per a single instruction.
+constexpr uint32_t kMaxOpCount = 6;
+
+// TODO: Use this one.
+constexpr uint32_t kMaxFuncArgs = 16;
+
+//! Maximum number of physical registers AsmJit can use per register group.
+constexpr uint32_t kMaxPhysRegs = 32;
+
+//! Maximum alignment.
+constexpr uint32_t kMaxAlignment = 64;
+
+//! Maximum label or symbol size in bytes.
+constexpr uint32_t kMaxLabelNameSize = 2048;
+
+//! Maximum section name size.
+constexpr uint32_t kMaxSectionNameSize = 35;
+
+//! Maximum size of comment.
+constexpr uint32_t kMaxCommentSize = 1024;
+
+//! Invalid identifier.
+constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
+
+//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
+constexpr uint32_t kNotFound = 0xFFFFFFFFu;
+
+//! Invalid base address.
+constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
+
+// ============================================================================
+// [asmjit::Globals::ResetPolicy]
+// ============================================================================
+
+//! Reset policy used by most `reset()` functions.
+enum ResetPolicy : uint32_t {
+ //! Soft reset, doesn't deallocate memory (default).
+ kResetSoft = 0,
+ //! Hard reset, releases all memory used, if any.
+ kResetHard = 1
+};
+
+// ============================================================================
+// [asmjit::Globals::Link]
+// ============================================================================
+
+enum Link : uint32_t {
+ kLinkLeft = 0,
+ kLinkRight = 1,
+
+ kLinkPrev = 0,
+ kLinkNext = 1,
+
+ kLinkFirst = 0,
+ kLinkLast = 1,
+
+ kLinkCount = 2
+};
+
+struct Init_ {};
+struct NoInit_ {};
+
+static const constexpr Init_ Init {};
+static const constexpr NoInit_ NoInit {};
+
+} // {Globals}
+
+// ============================================================================
+// [asmjit::Error]
+// ============================================================================
+
+//! AsmJit error type (uint32_t).
+typedef uint32_t Error;
+
+//! AsmJit error codes.
+enum ErrorCode : uint32_t {
+ //! No error (success).
+ kErrorOk = 0,
+
+ //! Out of memory.
+ kErrorOutOfMemory,
+
+ //! Invalid argument.
+ kErrorInvalidArgument,
+
+ //! Invalid state.
+ //!
+ //! If this error is returned it means that either you are doing something
+ //! wrong or AsmJit caught itself by doing something wrong. This error should
+ //! never be ignored.
+ kErrorInvalidState,
+
+ //! Invalid or incompatible architecture.
+ kErrorInvalidArch,
+
+ //! The object is not initialized.
+ kErrorNotInitialized,
+ //! The object is already initialized.
+ kErrorAlreadyInitialized,
+
+ //! Built-in feature was disabled at compile time and it's not available.
+ kErrorFeatureNotEnabled,
+
+ //! Too many handles (Windows) or file descriptors (Unix/Posix).
+ kErrorTooManyHandles,
+ //! Code generated is larger than allowed.
+ kErrorTooLarge,
+
+ //! No code generated.
+ //!
+ //! Returned by runtime if the `CodeHolder` contains no code.
+ kErrorNoCodeGenerated,
+
+ //! Invalid directive.
+ kErrorInvalidDirective,
+ //! Attempt to use uninitialized label.
+ kErrorInvalidLabel,
+ //! Label index overflow - a single `Assembler` instance can hold almost
+ //! 2^32 (4 billion) labels. If there is an attempt to create more labels
+ //! then this error is returned.
+ kErrorTooManyLabels,
+ //! Label is already bound.
+ kErrorLabelAlreadyBound,
+ //! Label is already defined (named labels).
+ kErrorLabelAlreadyDefined,
+ //! Label name is too long.
+ kErrorLabelNameTooLong,
+ //! Label must always be local if it's anonymous (without a name).
+ kErrorInvalidLabelName,
+ //! Parent id passed to `CodeHolder::newNamedLabelId()` was invalid.
+ kErrorInvalidParentLabel,
+ //! Parent id specified for a non-local (global) label.
+ kErrorNonLocalLabelCantHaveParent,
+
+ //! Invalid section.
+ kErrorInvalidSection,
+ //! Too many sections (section index overflow).
+ kErrorTooManySections,
+ //! Invalid section name (most probably too long).
+ kErrorInvalidSectionName,
+
+ //! Relocation index overflow (too many relocations).
+ kErrorTooManyRelocations,
+ //! Invalid relocation entry.
+ kErrorInvalidRelocEntry,
+ //! Reloc entry contains address that is out of range (unencodable).
+ kErrorRelocOffsetOutOfRange,
+
+ //! Invalid assignment to a register, function argument, or function return value.
+ kErrorInvalidAssignment,
+ //! Invalid instruction.
+ kErrorInvalidInstruction,
+ //! Invalid register type.
+ kErrorInvalidRegType,
+ //! Invalid register group.
+ kErrorInvalidRegGroup,
+ //! Invalid register's physical id.
+ kErrorInvalidPhysId,
+ //! Invalid register's virtual id.
+ kErrorInvalidVirtId,
+ //! Invalid prefix combination.
+ kErrorInvalidPrefixCombination,
+ //! Invalid LOCK prefix.
+ kErrorInvalidLockPrefix,
+ //! Invalid XACQUIRE prefix.
+ kErrorInvalidXAcquirePrefix,
+ //! Invalid XRELEASE prefix.
+ kErrorInvalidXReleasePrefix,
+ //! Invalid REP prefix.
+ kErrorInvalidRepPrefix,
+ //! Invalid REX prefix.
+ kErrorInvalidRexPrefix,
+ //! Invalid {...} register.
+ kErrorInvalidExtraReg,
+ //! Invalid {k} use (not supported by the instruction).
+ kErrorInvalidKMaskUse,
+ //! Invalid {k}{z} use (not supported by the instruction).
+ kErrorInvalidKZeroUse,
+ //! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox}.
+ kErrorInvalidBroadcast,
+ //! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512).
+ kErrorInvalidEROrSAE,
+ //! Invalid address used (not encodable).
+ kErrorInvalidAddress,
+ //! Invalid index register used in memory address (not encodable).
+ kErrorInvalidAddressIndex,
+ //! Invalid address scale (not encodable).
+ kErrorInvalidAddressScale,
+ //! Invalid use of 64-bit address.
+ kErrorInvalidAddress64Bit,
+ //! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
+ kErrorInvalidAddress64BitZeroExtension,
+ //! Invalid displacement (not encodable).
+ kErrorInvalidDisplacement,
+ //! Invalid segment (X86).
+ kErrorInvalidSegment,
+
+ //! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
+ kErrorInvalidImmediate,
+
+ //! Invalid operand size.
+ kErrorInvalidOperandSize,
+ //! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
+ kErrorAmbiguousOperandSize,
+ //! Mismatching operand size (size of multiple operands doesn't match the operation size).
+ kErrorOperandSizeMismatch,
+
+ //! Invalid option.
+ kErrorInvalidOption,
+ //! Option already defined.
+ kErrorOptionAlreadyDefined,
+
+ //! Invalid TypeId.
+ kErrorInvalidTypeId,
+ //! Invalid use of a 8-bit GPB-HIGH register.
+ kErrorInvalidUseOfGpbHi,
+ //! Invalid use of a 64-bit GPQ register in 32-bit mode.
+ kErrorInvalidUseOfGpq,
+ //! Invalid use of an 80-bit float (Type::kIdF80).
+ kErrorInvalidUseOfF80,
+ //! Some registers in the instruction muse be consecutive (some ARM and AVX512 neural-net instructions).
+ kErrorNotConsecutiveRegs,
+
+ //! AsmJit requires a physical register, but no one is available.
+ kErrorNoMorePhysRegs,
+ //! A variable has been assigned more than once to a function argument (BaseCompiler).
+ kErrorOverlappedRegs,
+ //! Invalid register to hold stack arguments offset.
+ kErrorOverlappingStackRegWithRegArg,
+
+ //! Unbound label cannot be evaluated by expression.
+ kErrorExpressionLabelNotBound,
+ //! Arithmetic overflow during expression evaluation.
+ kErrorExpressionOverflow,
+
+ //! Count of AsmJit error codes.
+ kErrorCount
+};
+
+// ============================================================================
+// [asmjit::ByteOrder]
+// ============================================================================
+
+//! Byte order.
+namespace ByteOrder {
+ enum : uint32_t {
+ kLE = 0,
+ kBE = 1,
+ kNative = ASMJIT_ARCH_LE ? kLE : kBE,
+ kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
+ };
+}
+
+// ============================================================================
+// [asmjit::ptr_as_func / func_as_ptr]
+// ============================================================================
+
+template<typename Func>
+static inline Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
+template<typename Func>
+static inline void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
+
+// ============================================================================
+// [asmjit::DebugUtils]
+// ============================================================================
+
+//! Debugging utilities.
+namespace DebugUtils {
+
+//! Used to silence warnings about unused arguments or variables.
+template<typename... Args>
+static ASMJIT_INLINE void unused(Args&&...) noexcept {}
+
+//! Returns the error `err` passed.
+//!
+//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
+//! help with tracing the origin of any error reported / returned by AsmJit.
+static constexpr Error errored(Error err) noexcept { return err; }
+
+//! Returns a printable version of `asmjit::Error` code.
+ASMJIT_API const char* errorAsString(Error err) noexcept;
+
+//! Called to output debugging message(s).
+ASMJIT_API void debugOutput(const char* str) noexcept;
+
+//! Called on assertion failure.
+//!
+//! \param file Source file name where it happened.
+//! \param line Line in the source file.
+//! \param msg Message to display.
+//!
+//! If you have problems with assertions put a breakpoint at assertionFailed()
+//! function (asmjit/core/globals.cpp) and check the call stack to locate the
+//! failing code.
+ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
+
+#if defined(ASMJIT_BUILD_DEBUG)
+#define ASMJIT_ASSERT(EXP) \
+ do { \
+ if (ASMJIT_LIKELY(EXP)) \
+ break; \
+ ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #EXP); \
+ } while (0)
+#else
+#define ASMJIT_ASSERT(EXP) ((void)0)
+#endif
+
+//! Used by AsmJit to propagate a possible `Error` produced by `...` to the caller.
+#define ASMJIT_PROPAGATE(...) \
+ do { \
+ ::asmjit::Error _err = __VA_ARGS__; \
+ if (ASMJIT_UNLIKELY(_err)) \
+ return _err; \
+ } while (0)
+
+} // {DebugUtils}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_GLOBALS_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/inst.cpp b/3rdparty/asmjit/src/asmjit/core/inst.cpp
new file mode 100644
index 00000000000..d89c29f1917
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/inst.cpp
@@ -0,0 +1,139 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/arch.h"
+#include "../core/inst.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86instapi_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/arminstapi_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::InstAPI - Text]
+// ============================================================================
+
+#ifndef ASMJIT_NO_TEXT
+Error InstAPI::instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::InstInternal::instIdToString(archId, instId, output);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::InstInternal::instIdToString(archId, instId, output);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+
+uint32_t InstAPI::stringToInstId(uint32_t archId, const char* s, size_t len) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::InstInternal::stringToInstId(archId, s, len);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::InstInternal::stringToInstId(archId, s, len);
+#endif
+
+ return 0;
+}
+#endif // !ASMJIT_NO_TEXT
+
+// ============================================================================
+// [asmjit::InstAPI - Validate]
+// ============================================================================
+
+#ifndef ASMJIT_NO_VALIDATION
+Error InstAPI::validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::InstInternal::validate(archId, inst, operands, opCount);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::InstInternal::validate(archId, inst, operands, opCount);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_VALIDATION
+
+// ============================================================================
+// [asmjit::InstAPI - QueryRWInfo]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+Error InstAPI::queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept {
+ if (ASMJIT_UNLIKELY(opCount > 6))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::InstInternal::queryRWInfo(archId, inst, operands, opCount, out);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::InstInternal::queryRWInfo(archId, inst, operands, opCount, out);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+// ============================================================================
+// [asmjit::InstAPI - QueryFeatures]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+Error InstAPI::queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept {
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::InstInternal::queryFeatures(archId, inst, operands, opCount, out);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::InstInternal::queryFeatures(archId, inst, operands, opCount, out);
+#endif
+
+ return DebugUtils::errored(kErrorInvalidArch);
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/core/inst.h b/3rdparty/asmjit/src/asmjit/core/inst.h
new file mode 100644
index 00000000000..91671822043
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/inst.h
@@ -0,0 +1,469 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_INST_H_INCLUDED
+#define ASMJIT_CORE_INST_H_INCLUDED
+
+#include "../core/cpuinfo.h"
+#include "../core/operand.h"
+#include "../core/string.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::InstInfo]
+// ============================================================================
+
+// TODO: Finalize instruction info and make more x86::InstDB methods/structs private.
+
+/*
+
+struct InstInfo {
+ //! Architecture agnostic attributes.
+ enum Attributes : uint32_t {
+
+
+ };
+
+ //! Instruction attributes.
+ uint32_t _attributes;
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ inline uint32_t attributes() const noexcept { return _attributes; }
+ inline bool hasAttribute(uint32_t attr) const noexcept { return (_attributes & attr) != 0; }
+};
+
+//! Gets attributes of the given instruction.
+ASMJIT_API Error queryCommonInfo(uint32_t archId, uint32_t instId, InstInfo& out) noexcept;
+
+*/
+
+// ============================================================================
+// [asmjit::InstRWInfo / OpRWInfo]
+// ============================================================================
+
+//! Read/Write information related to a single operand, used by `InstRWInfo`.
+struct OpRWInfo {
+ //! Read/Write flags, see `OpRWInfo::Flags`.
+ uint32_t _opFlags;
+ //! Physical register index, if required.
+ uint8_t _physId;
+ //! Size of a possible memory operand that can replace a register operand.
+ uint8_t _rmSize;
+ //! Reserved for future use.
+ uint8_t _reserved[2];
+ //! Read bit-mask where each bit represents one byte read from Reg/Mem.
+ uint64_t _readByteMask;
+ //! Write bit-mask where each bit represents one byte written to Reg/Mem.
+ uint64_t _writeByteMask;
+ //! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
+ uint64_t _extendByteMask;
+
+ //! Flags describe how the operand is accessed and some additional information.
+ enum Flags : uint32_t {
+ //! Operand is read.
+ //!
+ //! \note This flag must be `0x00000001`.
+ kRead = 0x00000001u,
+
+ //! Operand is written.
+ //!
+ //! \note This flag must be `0x00000002`.
+ kWrite = 0x00000002u,
+
+ //! Operand is both read and written.
+ //!
+ //! \note This combination of flags must be `0x00000003`.
+ kRW = 0x00000003u,
+
+ //! Register operand can be replaced by a memory operand.
+ kRegMem = 0x00000004u,
+
+ //! The `extendByteMask()` represents a zero extension.
+ kZExt = 0x00000010u,
+
+ //! Register operand must use `physId()`.
+ kRegPhysId = 0x00000100u,
+ //! Base register of a memory operand must use `physId()`.
+ kMemPhysId = 0x00000200u,
+
+ //! This memory operand is only used to encode registers and doesn't access memory.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
+ kMemFake = 0x000000400u,
+
+ //! Base register of the memory operand will be read.
+ kMemBaseRead = 0x00001000u,
+ //! Base register of the memory operand will be written.
+ kMemBaseWrite = 0x00002000u,
+ //! Base register of the memory operand will be read & written.
+ kMemBaseRW = 0x00003000u,
+
+ //! Index register of the memory operand will be read.
+ kMemIndexRead = 0x00004000u,
+ //! Index register of the memory operand will be written.
+ kMemIndexWrite = 0x00008000u,
+ //! Index register of the memory operand will be read & written.
+ kMemIndexRW = 0x0000C000u,
+
+ //! Base register of the memory operand will be modified before the operation.
+ kMemBasePreModify = 0x00010000u,
+ //! Base register of the memory operand will be modified after the operation.
+ kMemBasePostModify = 0x00020000u
+ };
+
+ static_assert(kRead == 0x1, "OpRWInfo::kRead flag must be 0x1");
+ static_assert(kWrite == 0x2, "OpRWInfo::kWrite flag must be 0x2");
+ static_assert(kRegMem == 0x4, "OpRWInfo::kRegMem flag must be 0x4");
+
+ //! \name Reset
+ //! \{
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+ inline void reset(uint32_t opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
+ _opFlags = opFlags;
+ _physId = uint8_t(physId);
+ _rmSize = uint8_t((opFlags & kRegMem) ? regSize : uint32_t(0));
+ _resetReserved();
+
+ uint64_t mask = Support::lsbMask<uint64_t>(regSize);
+ _readByteMask = opFlags & kRead ? mask : uint64_t(0);
+ _writeByteMask = opFlags & kWrite ? mask : uint64_t(0);
+ _extendByteMask = 0;
+ }
+
+ inline void _resetReserved() noexcept {
+ memset(_reserved, 0, sizeof(_reserved));
+ }
+
+ //! \}
+
+ //! \name Operand Flags
+ //! \{
+
+ inline uint32_t opFlags() const noexcept { return _opFlags; }
+ inline bool hasOpFlag(uint32_t flag) const noexcept { return (_opFlags & flag) != 0; }
+
+ inline void addOpFlags(uint32_t flags) noexcept { _opFlags |= flags; }
+ inline void clearOpFlags(uint32_t flags) noexcept { _opFlags &= ~flags; }
+
+ inline bool isRead() const noexcept { return hasOpFlag(kRead); }
+ inline bool isWrite() const noexcept { return hasOpFlag(kWrite); }
+ inline bool isReadWrite() const noexcept { return (_opFlags & kRW) == kRW; }
+ inline bool isReadOnly() const noexcept { return (_opFlags & kRW) == kRead; }
+ inline bool isWriteOnly() const noexcept { return (_opFlags & kRW) == kWrite; }
+ inline bool isRm() const noexcept { return hasOpFlag(kRegMem); }
+ inline bool isZExt() const noexcept { return hasOpFlag(kZExt); }
+
+ //! \}
+
+ //! \name Physical Register ID
+ //! \{
+
+ inline uint32_t physId() const noexcept { return _physId; }
+ inline bool hasPhysId() const noexcept { return _physId != BaseReg::kIdBad; }
+ inline void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
+
+ //! \}
+
+ //! \name Reg/Mem
+ //! \{
+
+ inline uint32_t rmSize() const noexcept { return _rmSize; }
+ inline void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
+
+ //! \}
+
+ //! \name Read & Write Masks
+ //! \{
+
+ inline uint64_t readByteMask() const noexcept { return _readByteMask; }
+ inline uint64_t writeByteMask() const noexcept { return _writeByteMask; }
+ inline uint64_t extendByteMask() const noexcept { return _extendByteMask; }
+
+ inline void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
+ inline void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
+ inline void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
+
+ //! \}
+};
+
+//! Read/Write information of an instruction.
+struct InstRWInfo {
+ //! Instruction flags.
+ uint32_t _instFlags;
+ //! Mask of flags read.
+ uint32_t _readFlags;
+ //! Mask of flags written.
+ uint32_t _writeFlags;
+ //! Count of operands.
+ uint8_t _opCount;
+ //! CPU feature required for replacing register operand with memory operand.
+ uint8_t _rmFeature;
+ //! Reserved for future use.
+ uint8_t _reserved[19];
+ //! Read/Write onfo of extra register (rep{} or kz{}).
+ OpRWInfo _extraReg;
+ //! Read/Write info of instruction operands.
+ OpRWInfo _operands[Globals::kMaxOpCount];
+
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ inline uint32_t instFlags() const noexcept { return _instFlags; }
+ inline bool hasInstFlag(uint32_t flag) const noexcept { return (_instFlags & flag) != 0; }
+
+ inline uint32_t opCount() const noexcept { return _opCount; }
+
+ inline uint32_t readFlags() const noexcept { return _readFlags; }
+ inline uint32_t writeFlags() const noexcept { return _writeFlags; }
+
+ //! Returns the CPU feature required to replace a register operand with memory
+ //! operand. If the returned feature is zero (none) then this instruction
+ //! either doesn't provide memory operand combination or there is no extra
+ //! CPU feature required.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! Some AVX+ instructions may require extra features for replacing registers
+ //! with memory operands, for example VPSLLDQ instruction only supports
+ //! 'reg/reg/imm' combination on AVX/AVX2 capable CPUs and requires AVX-512 for
+ //! 'reg/mem/imm' combination.
+ inline uint32_t rmFeature() const noexcept { return _rmFeature; }
+
+ inline const OpRWInfo& extraReg() const noexcept { return _extraReg; }
+ inline const OpRWInfo* operands() const noexcept { return _operands; }
+
+ inline const OpRWInfo& operand(size_t index) const noexcept {
+ ASMJIT_ASSERT(index < Globals::kMaxOpCount);
+ return _operands[index];
+ }
+};
+
+// ============================================================================
+// [asmjit::BaseInst]
+// ============================================================================
+
+//! Instruction id, options, and extraReg in a single structure. This structure
+//! exists mainly to simplify analysis and validation API that requires `BaseInst`
+//! and `Operand[]` array.
+class BaseInst {
+public:
+ //! Instruction id.
+ uint32_t _id;
+ //! Instruction options.
+ uint32_t _options;
+ //! Extra register used by instruction (either REP register or AVX-512 selector).
+ RegOnly _extraReg;
+
+ enum Id : uint32_t {
+ //! Invalid or uninitialized instruction id.
+ kIdNone = 0x00000000u,
+ //! Abstract instruction (BaseBuilder and BaseCompiler).
+ kIdAbstract = 0x80000000u
+ };
+
+ enum Options : uint32_t {
+ //! Used internally by emitters for handling errors and rare cases.
+ kOptionReserved = 0x00000001u,
+
+ //! Used only by Assembler to mark that `_op4` and `_op5` are used (internal).
+ //!
+ //! TODO: This should be removed in the future.
+ kOptionOp4Op5Used = 0x00000002u,
+
+ //! Prevents following a jump during compilation (BaseCompiler).
+ //!
+ //! TODO: This should be renamed to kOptionNoReturn.
+ kOptionUnfollow = 0x00000010u,
+
+ //! Overwrite the destination operand(s) (BaseCompiler).
+ //!
+ //! Hint that is important for register liveness analysis. It tells the
+ //! compiler that the destination operand will be overwritten now or by
+ //! adjacent instructions. BaseCompiler knows when a register is completely
+ //! overwritten by a single instruction, for example you don't have to
+ //! mark "movaps" or "pxor x, x", however, if a pair of instructions is
+ //! used and the first of them doesn't completely overwrite the content
+ //! of the destination, BaseCompiler fails to mark that register as dead.
+ //!
+ //! X86 Specific
+ //! ------------
+ //!
+ //! - All instructions that always overwrite at least the size of the
+ //! register the virtual-register uses , for example "mov", "movq",
+ //! "movaps" don't need the overwrite option to be used - conversion,
+ //! shuffle, and other miscellaneous instructions included.
+ //!
+ //! - All instructions that clear the destination register if all operands
+ //! are the same, for example "xor x, x", "pcmpeqb x x", etc...
+ //!
+ //! - Consecutive instructions that partially overwrite the variable until
+ //! there is no old content require `BaseCompiler::overwrite()` to be used.
+ //! Some examples (not always the best use cases thought):
+ //!
+ //! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
+ //! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
+ //! - `mov al, ?` followed by `and ax, 0xFF`
+ //! - `mov al, ?` followed by `mov ah, al`
+ //! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
+ //!
+ //! - If allocated variable is used temporarily for scalar operations. For
+ //! example if you allocate a full vector like `x86::Compiler::newXmm()`
+ //! and then use that vector for scalar operations you should use
+ //! `overwrite()` directive:
+ //!
+ //! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
+ //! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
+ kOptionOverwrite = 0x00000020u,
+
+ //! Emit short-form of the instruction.
+ kOptionShortForm = 0x00000040u,
+ //! Emit long-form of the instruction.
+ kOptionLongForm = 0x00000080u,
+
+ //! Conditional jump is likely to be taken.
+ kOptionTaken = 0x00000100u,
+ //! Conditional jump is unlikely to be taken.
+ kOptionNotTaken = 0x00000200u
+ };
+
+ //! Control type.
+ enum ControlType : uint32_t {
+ //! No control type (doesn't jump).
+ kControlNone = 0u,
+ //! Unconditional jump.
+ kControlJump = 1u,
+ //! Conditional jump (branch).
+ kControlBranch = 2u,
+ //! Function call.
+ kControlCall = 3u,
+ //! Function return.
+ kControlReturn = 4u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline explicit BaseInst(uint32_t id = 0, uint32_t options = 0) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg() {}
+
+ inline BaseInst(uint32_t id, uint32_t options, const RegOnly& extraReg) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg(extraReg) {}
+
+ inline BaseInst(uint32_t id, uint32_t options, const BaseReg& extraReg) noexcept
+ : _id(id),
+ _options(options),
+ _extraReg { extraReg.signature(), extraReg.id() } {}
+
+ //! \}
+
+ //! \name Instruction ID
+ //! \{
+
+ inline uint32_t id() const noexcept { return _id; }
+ inline void setId(uint32_t id) noexcept { _id = id; }
+ inline void resetId() noexcept { _id = 0; }
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ inline uint32_t options() const noexcept { return _options; }
+ inline void setOptions(uint32_t options) noexcept { _options = options; }
+ inline void addOptions(uint32_t options) noexcept { _options |= options; }
+ inline void clearOptions(uint32_t options) noexcept { _options &= ~options; }
+ inline void resetOptions() noexcept { _options = 0; }
+
+ //! \}
+
+ //! \name Extra Register
+ //! \{
+
+ inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
+ inline RegOnly& extraReg() noexcept { return _extraReg; }
+ inline const RegOnly& extraReg() const noexcept { return _extraReg; }
+ inline void setExtraReg(const BaseReg& reg) noexcept { _extraReg.init(reg); }
+ inline void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
+ inline void resetExtraReg() noexcept { _extraReg.reset(); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::InstAPI]
+// ============================================================================
+
+//! Instruction API.
+namespace InstAPI {
+
+#ifndef ASMJIT_NO_TEXT
+//! Appends the name of the instruction specified by `instId` and `instOptions`
+//! into the `output` string.
+//!
+//! \note Instruction options would only affect instruction prefix & suffix,
+//! other options would be ignored. If `instOptions` is zero then only raw
+//! instruction name (without any additional text) will be appended.
+ASMJIT_API Error instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept;
+
+//! Parses an instruction name in the given string `s`. Length is specified
+//! by `len` argument, which can be `SIZE_MAX` if `s` is known to be null
+//! terminated.
+//!
+//! The output is stored in `instId`.
+ASMJIT_API uint32_t stringToInstId(uint32_t archId, const char* s, size_t len) noexcept;
+#endif // !ASMJIT_NO_TEXT
+
+#ifndef ASMJIT_NO_VALIDATION
+//! Validates the given instruction.
+ASMJIT_API Error validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept;
+#endif // !ASMJIT_NO_VALIDATION
+
+#ifndef ASMJIT_NO_INTROSPECTION
+//! Gets Read/Write information of the given instruction.
+ASMJIT_API Error queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept;
+
+//! Gets CPU features required by the given instruction.
+ASMJIT_API Error queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept;
+#endif // !ASMJIT_NO_INTROSPECTION
+
+} // {InstAPI}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_INST_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/jitallocator.cpp b/3rdparty/asmjit/src/asmjit/core/jitallocator.cpp
new file mode 100644
index 00000000000..a8ca0c3d75a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/jitallocator.cpp
@@ -0,0 +1,1152 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/arch.h"
+#include "../core/jitallocator.h"
+#include "../core/osutils.h"
+#include "../core/support.h"
+#include "../core/virtmem.h"
+#include "../core/zone.h"
+#include "../core/zonelist.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::JitAllocator - Constants]
+// ============================================================================
+
+enum JitAllocatorConstants : uint32_t {
+ //! Number of pools to use when `JitAllocator::kOptionUseMultiplePools` is set.
+ //!
+ //! Each pool increases granularity twice to make memory management more
+ //! efficient. Ideal number of pools appears to be 3 to 4 as it distributes
+ //! small and large functions properly.
+ kJitAllocatorMultiPoolCount = 3,
+
+ //! Minimum granularity (and the default granularity for pool #0).
+ kJitAllocatorBaseGranularity = 64,
+
+ //! Maximum block size (16MB).
+ kJitAllocatorMaxBlockSize = 1024 * 1024 * 16
+};
+
+static inline uint32_t JitAllocator_defaultFillPattern() noexcept {
+ // X86 and X86_64 - 4x 'int3' instruction.
+ if (ASMJIT_ARCH_X86)
+ return 0xCCCCCCCCu;
+
+ // Unknown...
+ return 0u;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - BitFlipIterator]
+// ============================================================================
+
+//! BitWord[] iterator used by `JitAllocator` that can flip the search pattern
+//! during iteration.
+template<typename T>
+class BitFlipIterator {
+public:
+ ASMJIT_INLINE BitFlipIterator(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept {
+ init(data, numBitWords, start, xorMask);
+ }
+
+ ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0, T xorMask = 0) noexcept {
+ const T* ptr = data + (start / Support::bitSizeOf<T>());
+ size_t idx = Support::alignDown(start, Support::bitSizeOf<T>());
+ size_t end = numBitWords * Support::bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = (*ptr++ ^ xorMask) & (Support::allOnes<T>() << (start % Support::bitSizeOf<T>()));
+ while (!bitWord && (idx += Support::bitSizeOf<T>()) < end)
+ bitWord = *ptr++ ^ xorMask;
+ }
+
+ _ptr = ptr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ _xorMask = xorMask;
+ }
+
+ ASMJIT_INLINE bool hasNext() const noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = Support::ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += Support::bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++ ^ _xorMask;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t nextAndFlip() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = Support::ctz(bitWord);
+ bitWord ^= Support::allOnes<T>() << bit;
+ _xorMask ^= Support::allOnes<T>();
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += Support::bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++ ^ _xorMask;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t peekNext() const noexcept {
+ ASMJIT_ASSERT(_current != T(0));
+ return _idx + Support::ctz(_current);
+ }
+
+ const T* _ptr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+ T _xorMask;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - Pool]
+// ============================================================================
+
+class JitAllocatorBlock;
+
+class JitAllocatorPool {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocatorPool)
+
+ inline JitAllocatorPool(uint32_t granularity) noexcept
+ : blocks(),
+ cursor(nullptr),
+ blockCount(0),
+ granularity(uint16_t(granularity)),
+ granularityLog2(uint8_t(Support::ctz(granularity))),
+ emptyBlockCount(0),
+ totalAreaSize(0),
+ totalAreaUsed(0),
+ totalOverheadBytes(0) {}
+
+ inline void reset() noexcept {
+ blocks.reset();
+ cursor = nullptr;
+ blockCount = 0;
+ totalAreaSize = 0;
+ totalAreaUsed = 0;
+ totalOverheadBytes = 0;
+ }
+
+ inline size_t byteSizeFromAreaSize(uint32_t areaSize) const noexcept { return size_t(areaSize) * granularity; }
+ inline uint32_t areaSizeFromByteSize(size_t size) const noexcept { return uint32_t((size + granularity - 1) >> granularityLog2); }
+
+ inline size_t bitWordCountFromAreaSize(uint32_t areaSize) const noexcept {
+ using namespace Support;
+ return alignUp<size_t>(areaSize, kBitWordSizeInBits) / kBitWordSizeInBits;
+ }
+
+ //! Double linked list of blocks.
+ ZoneList<JitAllocatorBlock> blocks;
+ //! Where to start looking first.
+ JitAllocatorBlock* cursor;
+
+ //! Count of blocks.
+ uint32_t blockCount;
+ //! Allocation granularity.
+ uint16_t granularity;
+ //! Log2(granularity).
+ uint8_t granularityLog2;
+ //! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty).
+ uint8_t emptyBlockCount;
+
+ //! Number of bits reserved across all blocks.
+ size_t totalAreaSize;
+ //! Number of bits used across all blocks.
+ size_t totalAreaUsed;
+ //! Overhead of all blocks (in bytes).
+ size_t totalOverheadBytes;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - Block]
+// ============================================================================
+
+class JitAllocatorBlock : public ZoneTreeNodeT<JitAllocatorBlock>,
+ public ZoneListNode<JitAllocatorBlock> {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocatorBlock)
+
+ enum Flags : uint32_t {
+ //! Block is empty.
+ kFlagEmpty = 0x00000001u,
+ //! Block is dirty (largestUnusedArea, searchStart, searchEnd).
+ kFlagDirty = 0x00000002u,
+ //! Block is dual-mapped.
+ kFlagDualMapped = 0x00000004u
+ };
+
+ inline JitAllocatorBlock(
+ JitAllocatorPool* pool,
+ VirtMem::DualMapping mapping,
+ size_t blockSize,
+ uint32_t blockFlags,
+ Support::BitWord* usedBitVector,
+ Support::BitWord* stopBitVector,
+ uint32_t areaSize) noexcept
+ : ZoneTreeNodeT(),
+ pool(pool),
+ mapping(mapping),
+ blockSize(blockSize),
+ flags(blockFlags),
+ areaSize(areaSize),
+ areaUsed(0),
+ largestUnusedArea(areaSize),
+ searchStart(0),
+ searchEnd(areaSize),
+ usedBitVector(usedBitVector),
+ stopBitVector(stopBitVector) {}
+
+ inline uint8_t* roPtr() const noexcept { return static_cast<uint8_t*>(mapping.ro); }
+ inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(mapping.rw); }
+
+ inline bool hasFlag(uint32_t f) const noexcept { return (flags & f) != 0; }
+ inline void addFlags(uint32_t f) noexcept { flags |= f; }
+ inline void clearFlags(uint32_t f) noexcept { flags &= ~f; }
+
+ inline uint32_t areaAvailable() const noexcept { return areaSize - areaUsed; }
+
+ inline void increaseUsedArea(uint32_t value) noexcept {
+ areaUsed += value;
+ pool->totalAreaUsed += value;
+ }
+
+ inline void decreaseUsedArea(uint32_t value) noexcept {
+ areaUsed -= value;
+ pool->totalAreaUsed -= value;
+ }
+
+ // RBTree default CMP uses '<' and '>' operators.
+ inline bool operator<(const JitAllocatorBlock& other) const noexcept { return roPtr() < other.roPtr(); }
+ inline bool operator>(const JitAllocatorBlock& other) const noexcept { return roPtr() > other.roPtr(); }
+
+ // Special implementation for querying blocks by `key`, which must be in `[BlockPtr, BlockPtr + BlockSize)` range.
+ inline bool operator<(const uint8_t* key) const noexcept { return roPtr() + blockSize <= key; }
+ inline bool operator>(const uint8_t* key) const noexcept { return roPtr() > key; }
+
+ //! Link to the pool that owns this block.
+ JitAllocatorPool* pool;
+ //! Virtual memory mapping - either single mapping (both pointers equal) or
+ //! dual mapping, where one pointer is Read+Execute and the second Read+Write.
+ VirtMem::DualMapping mapping;
+ //! Virtual memory size (block size) [bytes].
+ size_t blockSize;
+
+ //! Block flags.
+ uint32_t flags;
+ //! Size of the whole block area (bit-vector size).
+ uint32_t areaSize;
+ //! Used area (number of bits in bit-vector used).
+ uint32_t areaUsed;
+ //! The largest unused continuous area in the bit-vector (or `areaSize` to initiate rescan).
+ uint32_t largestUnusedArea;
+ //! Start of a search range (for unused bits).
+ uint32_t searchStart;
+ //! End of a search range (for unused bits).
+ uint32_t searchEnd;
+
+ //! Used bit-vector (0 = unused, 1 = used).
+ Support::BitWord* usedBitVector;
+ //! Stop bit-vector (0 = don't care, 1 = stop).
+ Support::BitWord* stopBitVector;
+};
+
+// ============================================================================
+// [asmjit::JitAllocator - PrivateImpl]
+// ============================================================================
+
+class JitAllocatorPrivateImpl : public JitAllocator::Impl {
+public:
+ inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept
+ : JitAllocator::Impl {},
+ pools(pools),
+ poolCount(poolCount) {}
+ inline ~JitAllocatorPrivateImpl() noexcept {}
+
+ //! Lock for thread safety.
+ mutable Lock lock;
+ //! System page size (also a minimum block size).
+ uint32_t pageSize;
+
+ //! Blocks from all pools in RBTree.
+ ZoneTree<JitAllocatorBlock> tree;
+ //! Allocator pools.
+ JitAllocatorPool* pools;
+ //! Number of allocator pools.
+ size_t poolCount;
+};
+
+static const JitAllocator::Impl JitAllocatorImpl_none {};
+static const JitAllocator::CreateParams JitAllocatorParams_none {};
+
+// ============================================================================
+// [asmjit::JitAllocator - Utilities]
+// ============================================================================
+
+static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::CreateParams* params) noexcept {
+ VirtMem::Info vmInfo = VirtMem::info();
+
+ if (!params)
+ params = &JitAllocatorParams_none;
+
+ uint32_t options = params->options;
+ uint32_t blockSize = params->blockSize;
+ uint32_t granularity = params->granularity;
+ uint32_t fillPattern = params->fillPattern;
+
+ // Setup pool count to [1..3].
+ size_t poolCount = 1;
+ if (options & JitAllocator::kOptionUseMultiplePools)
+ poolCount = kJitAllocatorMultiPoolCount;;
+
+ // Setup block size [64kB..256MB].
+ if (blockSize < 64 * 1024 || blockSize > 256 * 1024 * 1024 || !Support::isPowerOf2(blockSize))
+ blockSize = vmInfo.pageGranularity;
+
+ // Setup granularity [64..256].
+ if (granularity < 64 || granularity > 256 || !Support::isPowerOf2(granularity))
+ granularity = kJitAllocatorBaseGranularity;
+
+ // Setup fill-pattern.
+ if (!(options & JitAllocator::kOptionCustomFillPattern))
+ fillPattern = JitAllocator_defaultFillPattern();
+
+ size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
+ void* p = ::malloc(size);
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+
+ JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
+ JitAllocatorPrivateImpl* impl = new(p) JitAllocatorPrivateImpl(pools, poolCount);
+
+ impl->options = options;
+ impl->blockSize = blockSize;
+ impl->granularity = granularity;
+ impl->fillPattern = fillPattern;
+ impl->pageSize = vmInfo.pageSize;
+
+ for (size_t poolId = 0; poolId < poolCount; poolId++)
+ new(&pools[poolId]) JitAllocatorPool(granularity << poolId);
+
+ return impl;
+}
+
+static inline void JitAllocatorImpl_destroy(JitAllocatorPrivateImpl* impl) noexcept {
+ impl->~JitAllocatorPrivateImpl();
+ ::free(impl);
+}
+
+static inline size_t JitAllocatorImpl_sizeToPoolId(const JitAllocatorPrivateImpl* impl, size_t size) noexcept {
+ size_t poolId = impl->poolCount - 1;
+ size_t granularity = size_t(impl->granularity) << poolId;
+
+ while (poolId) {
+ if (Support::alignUp(size, granularity) == size)
+ break;
+ poolId--;
+ granularity >>= 1;
+ }
+
+ return poolId;
+}
+
+static inline size_t JitAllocatorImpl_bitVectorSizeToByteSize(uint32_t areaSize) noexcept {
+ using Support::kBitWordSizeInBits;
+ return ((areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits) * sizeof(Support::BitWord);
+}
+
+static inline size_t JitAllocatorImpl_calculateIdealBlockSize(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t allocationSize) noexcept {
+ JitAllocatorBlock* last = pool->blocks.last();
+ size_t blockSize = last ? last->blockSize : size_t(impl->blockSize);
+
+ if (blockSize < kJitAllocatorMaxBlockSize)
+ blockSize *= 2u;
+
+ if (allocationSize > blockSize) {
+ blockSize = Support::alignUp(allocationSize, impl->blockSize);
+ if (ASMJIT_UNLIKELY(blockSize < allocationSize))
+ return 0; // Overflown.
+ }
+
+ return blockSize;
+}
+
+ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t pattern, size_t sizeInBytes) noexcept {
+ size_t n = sizeInBytes / 4u;
+ uint32_t* p = static_cast<uint32_t*>(mem);
+
+ for (size_t i = 0; i < n; i++)
+ p[i] = pattern;
+}
+
+// Allocate a new `JitAllocatorBlock` for the given `blockSize`.
+//
+// NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block
+// is only allocated when it's actually needed, so it would be cleared anyway.
+static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept {
+ using Support::BitWord;
+ using Support::kBitWordSizeInBits;
+
+ uint32_t areaSize = uint32_t((blockSize + pool->granularity - 1) >> pool->granularityLog2);
+ uint32_t numBitWords = (areaSize + kBitWordSizeInBits - 1u) / kBitWordSizeInBits;
+
+ JitAllocatorBlock* block = static_cast<JitAllocatorBlock*>(::malloc(sizeof(JitAllocatorBlock)));
+ BitWord* bitWords = nullptr;
+ VirtMem::DualMapping virtMem {};
+ Error err = kErrorOutOfMemory;
+
+ if (block != nullptr)
+ bitWords = static_cast<BitWord*>(::malloc(size_t(numBitWords) * 2 * sizeof(BitWord)));
+
+ uint32_t blockFlags = 0;
+ if (bitWords != nullptr) {
+ if (impl->options & JitAllocator::kOptionUseDualMapping) {
+ err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute);
+ blockFlags |= JitAllocatorBlock::kFlagDualMapped;
+ }
+ else {
+ err = VirtMem::alloc(&virtMem.ro, blockSize, VirtMem::kAccessReadWrite | VirtMem::kAccessExecute);
+ virtMem.rw = virtMem.ro;
+ }
+ }
+
+ // Out of memory.
+ if (ASMJIT_UNLIKELY(!block || !bitWords || err != kErrorOk)) {
+ if (bitWords) ::free(bitWords);
+ if (block) ::free(block);
+ return nullptr;
+ }
+
+ // Fill the memory if the secure mode is enabled.
+ if (impl->options & JitAllocator::kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(virtMem.rw, impl->fillPattern, blockSize);
+
+ memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
+ return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
+}
+
+static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ DebugUtils::unused(impl);
+
+ if (block->flags & JitAllocatorBlock::kFlagDualMapped)
+ VirtMem::releaseDualMapping(&block->mapping, block->blockSize);
+ else
+ VirtMem::release(block->mapping.ro, block->blockSize);
+
+ ::free(block->usedBitVector);
+ ::free(block);
+}
+
+static void JitAllocatorImpl_insertBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ if (!pool->cursor)
+ pool->cursor = block;
+
+ // Add to RBTree and List.
+ impl->tree.insert(block);
+ pool->blocks.append(block);
+
+ // Update statistics.
+ pool->blockCount++;
+ pool->totalAreaSize += block->areaSize;
+ pool->totalOverheadBytes += sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u;
+}
+
+static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ // Remove from RBTree and List.
+ if (pool->cursor == block)
+ pool->cursor = block->hasPrev() ? block->prev() : block->next();
+
+ impl->tree.remove(block);
+ pool->blocks.unlink(block);
+
+ // Update statistics.
+ pool->blockCount--;
+ pool->totalAreaSize -= block->areaSize;
+ pool->totalOverheadBytes -= sizeof(JitAllocatorBlock) + JitAllocatorImpl_bitVectorSizeToByteSize(block->areaSize) * 2u;
+}
+
+static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
+ JitAllocatorPool* pool = block->pool;
+
+ if (block->hasFlag(JitAllocatorBlock::kFlagEmpty))
+ return;
+
+ uint32_t areaSize = block->areaSize;
+ uint32_t granularity = pool->granularity;
+ size_t numBitWords = pool->bitWordCountFromAreaSize(areaSize);
+
+ if (impl->options & JitAllocator::kOptionFillUnusedMemory) {
+ BitFlipIterator<Support::BitWord> it(block->usedBitVector, numBitWords);
+
+ while (it.hasNext()) {
+ uint32_t start = uint32_t(it.nextAndFlip());
+ uint32_t end = areaSize;
+
+ if (it.hasNext())
+ end = uint32_t(it.nextAndFlip());
+
+ JitAllocatorImpl_fillPattern(block->rwPtr() + start * granularity, impl->fillPattern, (end - start) * granularity);
+ }
+ }
+
+ memset(block->usedBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
+ memset(block->stopBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
+
+ block->areaUsed = 0;
+ block->largestUnusedArea = areaSize;
+ block->searchStart = 0;
+ block->searchEnd = areaSize;
+ block->addFlags(JitAllocatorBlock::kFlagEmpty);
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Construction / Destruction]
+// ============================================================================
+
+JitAllocator::JitAllocator(const CreateParams* params) noexcept {
+ _impl = JitAllocatorImpl_new(params);
+ if (ASMJIT_UNLIKELY(!_impl))
+ _impl = const_cast<JitAllocator::Impl*>(&JitAllocatorImpl_none);
+}
+
+JitAllocator::~JitAllocator() noexcept {
+ if (_impl == &JitAllocatorImpl_none)
+ return;
+
+ reset(Globals::kResetHard);
+ JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Reset]
+// ============================================================================
+
+void JitAllocator::reset(uint32_t resetPolicy) noexcept {
+ if (_impl == &JitAllocatorImpl_none)
+ return;
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ impl->tree.reset();
+ size_t poolCount = impl->poolCount;
+
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
+ JitAllocatorPool& pool = impl->pools[poolId];
+ JitAllocatorBlock* block = pool.blocks.first();
+
+ JitAllocatorBlock* blockToKeep = nullptr;
+ if (resetPolicy != Globals::kResetHard && !(impl->options & kOptionImmediateRelease)) {
+ blockToKeep = block;
+ block = block->next();
+ }
+
+ while (block) {
+ JitAllocatorBlock* next = block->next();
+ JitAllocatorImpl_deleteBlock(impl, block);
+ block = next;
+ }
+
+ pool.reset();
+
+ if (blockToKeep) {
+ blockToKeep->_listNodes[0] = nullptr;
+ blockToKeep->_listNodes[1] = nullptr;
+ JitAllocatorImpl_wipeOutBlock(impl, blockToKeep);
+ JitAllocatorImpl_insertBlock(impl, blockToKeep);
+ pool.emptyBlockCount = 1;
+ }
+ }
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Statistics]
+// ============================================================================
+
+JitAllocator::Statistics JitAllocator::statistics() const noexcept {
+ Statistics statistics;
+ statistics.reset();
+
+ if (ASMJIT_LIKELY(_impl != &JitAllocatorImpl_none)) {
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+
+ size_t poolCount = impl->poolCount;
+ for (size_t poolId = 0; poolId < poolCount; poolId++) {
+ const JitAllocatorPool& pool = impl->pools[poolId];
+ statistics._blockCount += size_t(pool.blockCount);
+ statistics._reservedSize += size_t(pool.totalAreaSize) * pool.granularity;
+ statistics._usedSize += size_t(pool.totalAreaUsed) * pool.granularity;
+ statistics._overheadSize += size_t(pool.totalOverheadBytes);
+ }
+ }
+
+ return statistics;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Alloc / Release]
+// ============================================================================
+
+Error JitAllocator::alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
+
+ *roPtrOut = nullptr;
+ *rwPtrOut = nullptr;
+
+ // Align to the minimum granularity by default.
+ size = Support::alignUp<size_t>(size, impl->granularity);
+ if (ASMJIT_UNLIKELY(size == 0))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(size > std::numeric_limits<uint32_t>::max() / 2))
+ return DebugUtils::errored(kErrorTooLarge);
+
+ LockGuard guard(impl->lock);
+ JitAllocatorPool* pool = &impl->pools[JitAllocatorImpl_sizeToPoolId(impl, size)];
+
+ uint32_t areaIndex = kNoIndex;
+ uint32_t areaSize = uint32_t(pool->areaSizeFromByteSize(size));
+
+ // Try to find the requested memory area in existing blocks.
+ JitAllocatorBlock* block = pool->blocks.first();
+ if (block) {
+ JitAllocatorBlock* initial = block;
+ do {
+ JitAllocatorBlock* next = block->hasNext() ? block->next() : pool->blocks.first();
+ if (block->areaAvailable() >= areaSize) {
+ if (block->hasFlag(JitAllocatorBlock::kFlagDirty) || block->largestUnusedArea >= areaSize) {
+ uint32_t blockAreaSize = block->areaSize;
+ uint32_t searchStart = block->searchStart;
+ uint32_t searchEnd = block->searchEnd;
+
+ BitFlipIterator<Support::BitWord> it(
+ block->usedBitVector,
+ pool->bitWordCountFromAreaSize(searchEnd),
+ searchStart,
+ Support::allOnes<Support::BitWord>());
+
+ // If there is unused area available then there has to be at least one match.
+ ASMJIT_ASSERT(it.hasNext());
+
+ uint32_t bestArea = blockAreaSize;
+ uint32_t largestArea = 0;
+ uint32_t holeIndex = uint32_t(it.peekNext());
+ uint32_t holeEnd = holeIndex;
+
+ searchStart = holeIndex;
+ do {
+ holeIndex = uint32_t(it.nextAndFlip());
+ if (holeIndex >= searchEnd) break;
+
+ holeEnd = it.hasNext() ? Support::min(searchEnd, uint32_t(it.nextAndFlip())) : searchEnd;
+ uint32_t holeSize = holeEnd - holeIndex;
+
+ if (holeSize >= areaSize && bestArea >= holeSize) {
+ largestArea = Support::max(largestArea, bestArea);
+ bestArea = holeSize;
+ areaIndex = holeIndex;
+ }
+ else {
+ largestArea = Support::max(largestArea, holeSize);
+ }
+ } while (it.hasNext());
+ searchEnd = holeEnd;
+
+ // Because we have traversed the entire block, we can now mark the
+ // largest unused area that can be used to cache the next traversal.
+ block->searchStart = searchStart;
+ block->searchEnd = searchEnd;
+ block->largestUnusedArea = largestArea;
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+
+ if (areaIndex != kNoIndex) {
+ if (searchStart == areaIndex)
+ block->searchStart += areaSize;
+ break;
+ }
+ }
+ }
+
+ block = next;
+ } while (block != initial);
+ }
+
+ // Allocate a new block if there is no region of a required width.
+ if (areaIndex == kNoIndex) {
+ size_t blockSize = JitAllocatorImpl_calculateIdealBlockSize(impl, pool, size);
+ if (ASMJIT_UNLIKELY(!blockSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block = JitAllocatorImpl_newBlock(impl, pool, blockSize);
+
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ JitAllocatorImpl_insertBlock(impl, block);
+ areaIndex = 0;
+ block->searchStart = areaSize;
+ block->largestUnusedArea = block->areaSize - areaSize;
+ }
+
+ // Update statistics.
+ block->increaseUsedArea(areaSize);
+
+ // Handle special cases.
+ if (block->hasFlag(JitAllocatorBlock::kFlagEmpty)) {
+ pool->emptyBlockCount--;
+ block->clearFlags(JitAllocatorBlock::kFlagEmpty);
+ }
+
+ if (block->areaAvailable() == 0) {
+ // The whole block is filled.
+ block->searchStart = block->areaSize;
+ block->searchEnd = 0;
+ block->largestUnusedArea = 0;
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+ }
+
+ // Mark the newly allocated space as occupied and also the sentinel.
+ Support::bitVectorFill(block->usedBitVector, areaIndex, areaSize);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaSize - 1, true);
+
+ // Return a pointer to the allocated memory.
+ size_t offset = pool->byteSizeFromAreaSize(areaIndex);
+ ASMJIT_ASSERT(offset <= block->blockSize - size);
+
+ *roPtrOut = block->roPtr() + offset;
+ *rwPtrOut = block->rwPtr() + offset;
+ return kErrorOk;
+}
+
+Error JitAllocator::release(void* ro) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!ro))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(ro));
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // Offset relative to the start of the block.
+ JitAllocatorPool* pool = block->pool;
+ size_t offset = (size_t)((uint8_t*)ro - block->roPtr());
+
+ // The first bit representing the allocated area and its size.
+ uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
+ uint32_t areaLast = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true));
+ uint32_t areaSize = areaLast - areaIndex + 1;
+
+ // Update the search region and statistics.
+ block->searchStart = Support::min(block->searchStart, areaIndex);
+ block->searchEnd = Support::max(block->searchEnd, areaLast + 1);
+ block->addFlags(JitAllocatorBlock::kFlagDirty);
+ block->decreaseUsedArea(areaSize);
+
+ // Clear all occupied bits and also the sentinel.
+ Support::bitVectorClear(block->usedBitVector, areaIndex, areaSize);
+ Support::bitVectorSetBit(block->stopBitVector, areaLast, false);
+
+ // Fill the released memory if the secure mode is enabled.
+ if (impl->options & kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(block->rwPtr() + areaIndex * pool->granularity, impl->fillPattern, areaSize * pool->granularity);
+
+ // Release the whole block if it became empty.
+ if (block->areaUsed == 0) {
+ if (pool->emptyBlockCount || (impl->options & kOptionImmediateRelease)) {
+ JitAllocatorImpl_removeBlock(impl, block);
+ JitAllocatorImpl_deleteBlock(impl, block);
+ }
+ else {
+ pool->emptyBlockCount++;
+ block->largestUnusedArea = areaSize;
+ block->searchStart = 0;
+ block->searchEnd = areaSize;
+ block->addFlags(JitAllocatorBlock::kFlagEmpty);
+ block->clearFlags(JitAllocatorBlock::kFlagDirty);
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error JitAllocator::shrink(void* ro, size_t newSize) noexcept {
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ if (ASMJIT_UNLIKELY(!ro))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ if (ASMJIT_UNLIKELY(newSize == 0))
+ return release(ro);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(ro));
+
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Offset relative to the start of the block.
+ JitAllocatorPool* pool = block->pool;
+ size_t offset = (size_t)((uint8_t*)ro - block->roPtr());
+
+ // The first bit representing the allocated area and its size.
+ uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
+ uint32_t areaOldSize = uint32_t(Support::bitVectorIndexOf(block->stopBitVector, areaIndex, true)) + 1 - areaIndex;
+ uint32_t areaNewSize = pool->areaSizeFromByteSize(newSize);
+
+ if (ASMJIT_UNLIKELY(areaNewSize > areaOldSize))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t areaDiff = areaOldSize - areaNewSize;
+ if (!areaDiff)
+ return kErrorOk;
+
+ // Update the search region and statistics.
+ block->searchStart = Support::min(block->searchStart, areaIndex + areaNewSize);
+ block->searchEnd = Support::max(block->searchEnd, areaIndex + areaOldSize);
+ block->addFlags(JitAllocatorBlock::kFlagDirty);
+ block->decreaseUsedArea(areaDiff);
+
+ // Unmark the released space and move the sentinel.
+ Support::bitVectorClear(block->usedBitVector, areaIndex + areaNewSize, areaDiff);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaOldSize - 1, false);
+ Support::bitVectorSetBit(block->stopBitVector, areaIndex + areaNewSize - 1, true);
+
+ // Fill released memory if the secure mode is enabled.
+ if (impl->options & kOptionFillUnusedMemory)
+ JitAllocatorImpl_fillPattern(
+ block->rwPtr() + (areaIndex + areaOldSize) * pool->granularity,
+ fillPattern(),
+ areaDiff * pool->granularity);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::JitAllocator - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+// A pseudo random number generator based on a paper by Sebastiano Vigna:
+// http://vigna.di.unimi.it/ftp/papers/xorshiftplus.pdf
+class Random {
+public:
+ // Constants suggested as `23/18/5`.
+ enum Steps : uint32_t {
+ kStep1_SHL = 23,
+ kStep2_SHR = 18,
+ kStep3_SHR = 5
+ };
+
+ inline explicit Random(uint64_t seed = 0) noexcept { reset(seed); }
+ inline Random(const Random& other) noexcept = default;
+
+ inline void reset(uint64_t seed = 0) noexcept {
+ // The number is arbitrary, it means nothing.
+ constexpr uint64_t kZeroSeed = 0x1F0A2BE71D163FA0u;
+
+ // Generate the state data by using splitmix64.
+ for (uint32_t i = 0; i < 2; i++) {
+ seed += 0x9E3779B97F4A7C15u;
+ uint64_t x = seed;
+ x = (x ^ (x >> 30)) * 0xBF58476D1CE4E5B9u;
+ x = (x ^ (x >> 27)) * 0x94D049BB133111EBu;
+ x = (x ^ (x >> 31));
+ _state[i] = x != 0 ? x : kZeroSeed;
+ }
+ }
+
+ inline uint32_t nextUInt32() noexcept {
+ return uint32_t(nextUInt64() >> 32);
+ }
+
+ inline uint64_t nextUInt64() noexcept {
+ uint64_t x = _state[0];
+ uint64_t y = _state[1];
+
+ x ^= x << kStep1_SHL;
+ y ^= y >> kStep3_SHR;
+ x ^= x >> kStep2_SHR;
+ x ^= y;
+
+ _state[0] = y;
+ _state[1] = x;
+ return x + y;
+ }
+
+ uint64_t _state[2];
+};
+
+// Helper class to verify that JitAllocator doesn't return addresses that overlap.
+class JitAllocatorWrapper {
+public:
+ explicit inline JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
+ : _zone(1024 * 1024),
+ _heap(&_zone),
+ _allocator(params) {}
+
+ // Address to a memory region of a given size.
+ class Range {
+ public:
+ inline Range(uint8_t* addr, size_t size) noexcept
+ : addr(addr),
+ size(size) {}
+ uint8_t* addr;
+ size_t size;
+ };
+
+ // Based on JitAllocator::Block, serves our purpose well...
+ class Record : public ZoneTreeNodeT<Record>,
+ public Range {
+ public:
+ inline Record(uint8_t* addr, size_t size)
+ : ZoneTreeNodeT<Record>(),
+ Range(addr, size) {}
+
+ inline bool operator<(const Record& other) const noexcept { return addr < other.addr; }
+ inline bool operator>(const Record& other) const noexcept { return addr > other.addr; }
+
+ inline bool operator<(const uint8_t* key) const noexcept { return addr + size <= key; }
+ inline bool operator>(const uint8_t* key) const noexcept { return addr > key; }
+ };
+
+ void _insert(void* p_, size_t size) noexcept {
+ uint8_t* p = static_cast<uint8_t*>(p_);
+ uint8_t* pEnd = p + size - 1;
+
+ Record* record;
+
+ record = _records.get(p);
+ if (record)
+ EXPECT(record == nullptr,
+ "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
+
+ record = _records.get(pEnd);
+ if (record)
+ EXPECT(record == nullptr,
+ "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
+
+ record = _heap.newT<Record>(p, size);
+ EXPECT(record != nullptr,
+ "Out of memory, cannot allocate 'Record'");
+
+ _records.insert(record);
+ }
+
+ void _remove(void* p) noexcept {
+ Record* record = _records.get(static_cast<uint8_t*>(p));
+ EXPECT(record != nullptr,
+ "Address [%p] doesn't exist\n", p);
+
+ _records.remove(record);
+ _heap.release(record, sizeof(Record));
+ }
+
+ void* alloc(size_t size) noexcept {
+ void* roPtr;
+ void* rwPtr;
+
+ Error err = _allocator.alloc(&roPtr, &rwPtr, size);
+ EXPECT(err == kErrorOk,
+ "JitAllocator failed to allocate '%u' bytes\n", unsigned(size));
+
+ _insert(roPtr, size);
+ return roPtr;
+ }
+
+ void release(void* p) noexcept {
+ _remove(p);
+ EXPECT(_allocator.release(p) == kErrorOk,
+ "JitAllocator failed to release '%p'\n", p);
+ }
+
+ Zone _zone;
+ ZoneAllocator _heap;
+ ZoneTree<Record> _records;
+ JitAllocator _allocator;
+};
+
+static void JitAllocatorTest_shuffle(void** ptrArray, size_t count, Random& prng) noexcept {
+ for (size_t i = 0; i < count; ++i)
+ std::swap(ptrArray[i], ptrArray[size_t(prng.nextUInt32() % count)]);
+}
+
+static void JitAllocatorTest_usage(JitAllocator& allocator) noexcept {
+ JitAllocator::Statistics stats = allocator.statistics();
+ INFO(" Block Count : %9llu [Blocks]" , (unsigned long long)(stats.blockCount()));
+ INFO(" Reserved (VirtMem): %9llu [Bytes]" , (unsigned long long)(stats.reservedSize()));
+ INFO(" Used (VirtMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.usedSize()), stats.usedSizeAsPercent());
+ INFO(" Overhead (HeapMem): %9llu [Bytes] (%.1f%%)", (unsigned long long)(stats.overheadSize()), stats.overheadSizeAsPercent());
+}
+
+UNIT(jit_allocator) {
+ size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000;
+
+ struct TestParams {
+ const char* name;
+ uint32_t options;
+ uint32_t blockSize;
+ uint32_t granularity;
+ };
+
+ #define OPT(OPTION) JitAllocator::OPTION
+ static TestParams testParams[] = {
+ { "Default", 0, 0, 0 },
+ { "16MB blocks", 0, 16 * 1024 * 1024, 0 },
+ { "256B granularity", 0, 0, 256 },
+ { "kOptionUseDualMapping", OPT(kOptionUseDualMapping), 0, 0 },
+ { "kOptionUseMultiplePools", OPT(kOptionUseMultiplePools), 0, 0 },
+ { "kOptionFillUnusedMemory", OPT(kOptionFillUnusedMemory), 0, 0 },
+ { "kOptionImmediateRelease", OPT(kOptionImmediateRelease), 0, 0 },
+ { "kOptionUseDualMapping | kOptionFillUnusedMemory", OPT(kOptionUseDualMapping) | OPT(kOptionFillUnusedMemory), 0, 0 }
+ };
+ #undef OPT
+
+ INFO("BitFlipIterator<uint32_t>");
+ {
+ static const uint32_t bits[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ BitFlipIterator<uint32_t> it(bits, ASMJIT_ARRAY_SIZE(bits));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 32);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 64);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 127);
+ EXPECT(!it.hasNext());
+ }
+
+ INFO("BitFlipIterator<uint64_t>");
+ {
+ static const uint64_t bits[] = { 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFF, 0, 0 };
+ BitFlipIterator<uint64_t> it(bits, ASMJIT_ARRAY_SIZE(bits));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 0);
+ EXPECT(it.hasNext());
+ EXPECT(it.nextAndFlip() == 128);
+ EXPECT(!it.hasNext());
+ }
+
+ for (uint32_t testId = 0; testId < ASMJIT_ARRAY_SIZE(testParams); testId++) {
+ INFO("Testing JitAllocator: %s", testParams[testId].name);
+
+ JitAllocator::CreateParams params {};
+ params.options = testParams[testId].options;
+ params.blockSize = testParams[testId].blockSize;
+ params.granularity = testParams[testId].granularity;
+
+ JitAllocatorWrapper wrapper(&params);
+ Random prng(100);
+
+ size_t i;
+
+ INFO(" Memory alloc/release test - %d allocations", kCount);
+
+ void** ptrArray = (void**)::malloc(sizeof(void*) * size_t(kCount));
+ EXPECT(ptrArray != nullptr,
+ "Couldn't allocate '%u' bytes for pointer-array", unsigned(sizeof(void*) * size_t(kCount)));
+
+ INFO(" Allocating virtual memory...");
+ for (i = 0; i < kCount; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Releasing virtual memory...");
+ for (i = 0; i < kCount; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Allocating virtual memory...", kCount);
+ for (i = 0; i < kCount; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Shuffling...");
+ JitAllocatorTest_shuffle(ptrArray, unsigned(kCount), prng);
+
+ INFO(" Releasing 50%% blocks...");
+ for (i = 0; i < kCount / 2; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Allocating 50%% blocks again...");
+ for (i = 0; i < kCount / 2; i++)
+ ptrArray[i] = wrapper.alloc((prng.nextUInt32() % 1024) + 8);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ INFO(" Releasing virtual memory...");
+ for (i = 0; i < kCount; i++)
+ wrapper.release(ptrArray[i]);
+ JitAllocatorTest_usage(wrapper._allocator);
+
+ ::free(ptrArray);
+ }
+}
+#endif
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/jitallocator.h b/3rdparty/asmjit/src/asmjit/core/jitallocator.h
new file mode 100644
index 00000000000..111716ed59e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/jitallocator.h
@@ -0,0 +1,278 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
+#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/globals.h"
+#include "../core/virtmem.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_jit
+//! \{
+
+// ============================================================================
+// [asmjit::JitAllocator]
+// ============================================================================
+
+//! A simple implementation of memory manager that uses `asmjit::VirtMem`
+//! functions to manage virtual memory for JIT compiled code.
+//!
+//! Implementation notes:
+//!
+//! - Granularity of allocated blocks is different than granularity for a typical
+//! C malloc. In addition, the allocator can use several memory pools having a
+//! different granularity to minimize the maintenance overhead. Multiple pools
+//! feature requires `kFlagUseMultiplePools` flag to be set.
+//!
+//! - The allocator doesn't store any information in executable memory, instead,
+//! the implementation uses two bit-vectors to manage allocated memory of each
+//! allocator-block. The first bit-vector called 'used' is used to track used
+//! memory (where each bit represents memory size defined by granularity) and
+//! the second bit vector called 'stop' is used as a sentinel to mark where
+//! the allocated area ends.
+//!
+//! - Internally, the allocator also uses RB tree to keep track of all blocks
+//! across all pools. Each inserted block is added to the tree so it can be
+//! matched fast during `release()` and `shrink()`.
+class JitAllocator {
+public:
+ ASMJIT_NONCOPYABLE(JitAllocator)
+
+ struct Impl {
+ //! Allocator options, see \ref JitAllocator::Options.
+ uint32_t options;
+ //! Base block size (0 if the allocator is not initialized).
+ uint32_t blockSize;
+ //! Base granularity (0 if the allocator is not initialized).
+ uint32_t granularity;
+ //! A pattern that is used to fill unused memory if secure mode is enabled.
+ uint32_t fillPattern;
+ };
+
+ //! Allocator implementation (private).
+ Impl* _impl;
+
+ enum Options : uint32_t {
+ //! Enables the use of an anonymous memory-mapped memory that is mapped into
+ //! two buffers having a different pointer. The first buffer has read and
+ //! execute permissions and the second buffer has read+write permissions.
+ //!
+ //! See \ref VirtMem::allocDualMapping() for more details about this feature.
+ kOptionUseDualMapping = 0x00000001u,
+
+ //! Enables the use of multiple pools with increasing granularity instead of
+ //! a single pool. This flag would enable 3 internal pools in total having
+ //! 64, 128, and 256 bytes granularity.
+ //!
+ //! This feature is only recommended for users that generate a lot of code
+ //! and would like to minimize the overhead of `JitAllocator` itself by
+ //! having blocks of different allocation granularities. Using this feature
+ //! only for few allocations won't pay off as the allocator may need to
+ //! create more blocks initially before it can take the advantage of
+ //! variable block granularity.
+ kOptionUseMultiplePools = 0x00000002u,
+
+ //! Always fill reserved memory by a fill-pattern.
+ //!
+ //! Causes a new block to be cleared by the fill pattern and freshly
+ //! released memory to be cleared before making it ready for another use.
+ kOptionFillUnusedMemory = 0x00000004u,
+
+ //! When this flag is set the allocator would immediately release unused
+ //! blocks during `release()` or `reset()`. When this flag is not set the
+ //! allocator would keep one empty block in each pool to prevent excessive
+ //! virtual memory allocations and deallocations in border cases, which
+ //! involve constantly allocating and deallocating a single block caused
+ //! by repetitive calling `alloc()` and `release()` when the allocator has
+ //! either no blocks or have all blocks fully occupied.
+ kOptionImmediateRelease = 0x00000008u,
+
+ //! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
+ kOptionCustomFillPattern = 0x10000000u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Parameters that can be passed to `JitAllocator` constructor.
+ //!
+ //! Use it like this:
+ //!
+ //! ```
+ //! // Zero initialize (zero means the default value) and change what you need.
+ //! JitAllocator::CreateParams params {};
+ //! params.blockSize = 1024 * 1024;
+ //!
+ //! // Create the allocator.
+ //! JitAllocator allocator(&params);
+ //! ```
+ struct CreateParams {
+ // Reset the content of `CreateParams`.
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ //! Allocator options, see \ref JitAllocator::Options.
+ //!
+ //! No options are used by default.
+ uint32_t options;
+
+ //! Base size of a single block in bytes (default 64kB).
+ //!
+ //! \remarks Block size must be equal or greater to page size and must be
+ //! power of 2. If the input is not valid then the default block size will
+ //! be used instead.
+ uint32_t blockSize;
+
+ //! Base granularity (and also natural alignment) of allocations in bytes
+ //! (default 64).
+ //!
+ //! Since the `JitAllocator` uses bit-arrays to mark used memory the
+ //! granularity also specifies how many bytes correspond to a single bit in
+ //! such bit-array. Higher granularity means more waste of virtual memory
+ //! (as it increases the natural alignment), but smaller bit-arrays as less
+ //! bits would be required per a single block.
+ uint32_t granularity;
+
+ //! Patter to use to fill unused memory.
+ //!
+ //! Only used if \ref kOptionCustomFillPattern is set.
+ uint32_t fillPattern;
+ };
+
+ //! Creates a `JitAllocator` instance.
+ explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept;
+ //! Destroys the `JitAllocator` instance and release all blocks held.
+ ASMJIT_API ~JitAllocator() noexcept;
+
+ inline bool isInitialized() const noexcept { return _impl->blockSize == 0; }
+
+ //! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
+ //!
+ //! \remarks This function is not thread-safe as it's designed to be used when
+ //! nobody else is using allocator. The reason is that there is no point of
+ //1 calling `reset()` when the allocator is still in use.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns allocator options, see `Flags`.
+ inline uint32_t options() const noexcept { return _impl->options; }
+ //! Tests whether the allocator has the given `option` set.
+ inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; }
+
+ //! Returns a base block size (a minimum size of block that the allocator would allocate).
+ inline uint32_t blockSize() const noexcept { return _impl->blockSize; }
+ //! Returns granularity of the allocator.
+ inline uint32_t granularity() const noexcept { return _impl->granularity; }
+ //! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
+ inline uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
+
+ //! \}
+
+ //! \name Alloc & Release
+ //! \{
+
+ //! Allocate `size` bytes of virtual memory.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept;
+
+ //! Release a memory returned by `alloc()`.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error release(void* ro) noexcept;
+
+ //! Free extra memory allocated with `p` by restricting it to `newSize` size.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Error shrink(void* ro, size_t newSize) noexcept;
+
+ //! \}
+
+ //! \name Statistics
+ //! \{
+
+ //! Statistics about `JitAllocator`.
+ struct Statistics {
+ inline void reset() noexcept {
+ _blockCount = 0;
+ _usedSize = 0;
+ _reservedSize = 0;
+ _overheadSize = 0;
+ }
+
+ //! Returns count of blocks managed by `JitAllocator` at the moment.
+ inline size_t blockCount() const noexcept { return _blockCount; }
+
+ //! Returns how many bytes are currently used.
+ inline size_t usedSize() const noexcept { return _usedSize; }
+ //! Returns the number of bytes unused by the allocator at the moment.
+ inline size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
+ //! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks).
+ inline size_t reservedSize() const noexcept { return _reservedSize; }
+ //! Returns the number of bytes the allocator needs to manage the allocated memory.
+ inline size_t overheadSize() const noexcept { return _overheadSize; }
+
+ inline double usedSizeAsPercent() const noexcept {
+ return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+
+ inline double unusedSizeAsPercent() const noexcept {
+ return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+
+ inline double overheadSizeAsPercent() const noexcept {
+ return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
+ }
+
+ //! Number of blocks `JitAllocator` maintains.
+ size_t _blockCount;
+ //! How many bytes are currently used / allocated.
+ size_t _usedSize;
+ //! How many bytes are currently reserved by the allocator.
+ size_t _reservedSize;
+ //! Allocation overhead (in bytes) required to maintain all blocks.
+ size_t _overheadSize;
+ };
+
+ //! Returns JIT allocator statistics.
+ //!
+ //! \remarks This function is thread-safe.
+ ASMJIT_API Statistics statistics() const noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/jitruntime.cpp b/3rdparty/asmjit/src/asmjit/core/jitruntime.cpp
new file mode 100644
index 00000000000..625cc3d2f51
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/jitruntime.cpp
@@ -0,0 +1,156 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/cpuinfo.h"
+#include "../core/jitruntime.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::JitRuntime - Utilities]
+// ============================================================================
+
+// Only useful on non-x86 architectures.
+static inline void JitRuntime_flushInstructionCache(const void* p, size_t size) noexcept {
+#if defined(_WIN32) && !ASMJIT_ARCH_X86
+ // Windows has a built-in support in `kernel32.dll`.
+ ::FlushInstructionCache(::GetCurrentProcess(), p, size);
+#else
+ DebugUtils::unused(p, size);
+#endif
+}
+
+// X86 Target
+// ----------
+//
+// - 32-bit - Linux, OSX, BSD, and apparently also Haiku guarantee 16-byte
+// stack alignment. Other operating systems are assumed to have
+// 4-byte alignment by default for safety reasons.
+// - 64-bit - stack must be aligned to 16 bytes.
+//
+// ARM Target
+// ----------
+//
+// - 32-bit - Stack must be aligned to 8 bytes.
+// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
+static inline uint32_t JitRuntime_detectNaturalStackAlignment() noexcept {
+#if ASMJIT_ARCH_BITS == 64 || \
+ defined(__APPLE__ ) || \
+ defined(__DragonFly__) || \
+ defined(__HAIKU__ ) || \
+ defined(__FreeBSD__ ) || \
+ defined(__NetBSD__ ) || \
+ defined(__OpenBSD__ ) || \
+ defined(__bsdi__ ) || \
+ defined(__linux__ )
+ return 16;
+#elif ASMJIT_ARCH_ARM
+ return 8;
+#else
+ return uint32_t(sizeof(uintptr_t));
+#endif
+}
+
+// ============================================================================
+// [asmjit::JitRuntime - Construction / Destruction]
+// ============================================================================
+
+JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
+ : _allocator(params) {
+
+ // Setup target properties.
+ _targetType = kTargetJit;
+ _codeInfo._archInfo = CpuInfo::host().archInfo();
+ _codeInfo._stackAlignment = uint8_t(JitRuntime_detectNaturalStackAlignment());
+ _codeInfo._cdeclCallConv = CallConv::kIdHostCDecl;
+ _codeInfo._stdCallConv = CallConv::kIdHostStdCall;
+ _codeInfo._fastCallConv = CallConv::kIdHostFastCall;
+}
+JitRuntime::~JitRuntime() noexcept {}
+
+// ============================================================================
+// [asmjit::JitRuntime - Interface]
+// ============================================================================
+
+Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
+ *dst = nullptr;
+
+ ASMJIT_PROPAGATE(code->flatten());
+ ASMJIT_PROPAGATE(code->resolveUnresolvedLinks());
+
+ size_t estimatedCodeSize = code->codeSize();
+ if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
+ return DebugUtils::errored(kErrorNoCodeGenerated);
+
+ uint8_t* ro;
+ uint8_t* rw;
+ ASMJIT_PROPAGATE(_allocator.alloc((void**)&ro, (void**)&rw, estimatedCodeSize));
+
+ // Relocate the code.
+ Error err = code->relocateToBase(uintptr_t((void*)ro));
+ if (ASMJIT_UNLIKELY(err)) {
+ _allocator.release(ro);
+ return err;
+ }
+
+ // Recalculate the final code size and shrink the memory we allocated for it
+ // in case that some relocations didn't require records in an address table.
+ size_t codeSize = code->codeSize();
+
+ for (Section* section : code->_sections) {
+ size_t offset = size_t(section->offset());
+ size_t bufferSize = size_t(section->bufferSize());
+ size_t virtualSize = size_t(section->virtualSize());
+
+ ASMJIT_ASSERT(offset + bufferSize <= codeSize);
+ memcpy(rw + offset, section->data(), bufferSize);
+
+ if (virtualSize > bufferSize) {
+ ASMJIT_ASSERT(offset + virtualSize <= codeSize);
+ memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
+ }
+ }
+
+ if (codeSize < estimatedCodeSize)
+ _allocator.shrink(ro, codeSize);
+
+ flush(ro, codeSize);
+ *dst = ro;
+
+ return kErrorOk;
+}
+
+Error JitRuntime::_release(void* p) noexcept {
+ return _allocator.release(p);
+}
+
+void JitRuntime::flush(const void* p, size_t size) noexcept {
+ JitRuntime_flushInstructionCache(p, size);
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/jitruntime.h b/3rdparty/asmjit/src/asmjit/core/jitruntime.h
new file mode 100644
index 00000000000..97f26e7a6c1
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/jitruntime.h
@@ -0,0 +1,126 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
+#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/codeholder.h"
+#include "../core/jitallocator.h"
+#include "../core/target.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+class CodeHolder;
+
+//! \addtogroup asmjit_jit
+//! \{
+
+// ============================================================================
+// [asmjit::JitRuntime]
+// ============================================================================
+
+//! JIT execution runtime is a special `Target` that is designed to store and
+//! execute the generated code.
+class ASMJIT_VIRTAPI JitRuntime : public Target {
+public:
+ ASMJIT_NONCOPYABLE(JitRuntime)
+
+ //! Virtual memory allocator.
+ JitAllocator _allocator;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `JitRuntime` instance.
+ explicit ASMJIT_API JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
+ //! Destroys the `JitRuntime` instance.
+ ASMJIT_API virtual ~JitRuntime() noexcept;
+
+ inline void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept {
+ _allocator.reset(resetPolicy);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated `JitAllocator`.
+ inline JitAllocator* allocator() const noexcept { return const_cast<JitAllocator*>(&_allocator); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ // NOTE: To allow passing function pointers to `add()` and `release()` the
+ // virtual methods are prefixed with `_` and called from templates instead.
+
+ //! Allocates memory needed for a code stored in the `CodeHolder` and relocates
+ //! the code to the pointer allocated.
+ //!
+ //! The beginning of the memory allocated for the function is returned in `dst`.
+ //! If failed `Error` code is returned and `dst` is explicitly set to `nullptr`
+ //! (this means that you don't have to set it to null before calling `add()`).
+ template<typename Func>
+ inline Error add(Func* dst, CodeHolder* code) noexcept {
+ return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
+ }
+
+ //! Releases `p` which was obtained by calling `add()`.
+ template<typename Func>
+ inline Error release(Func p) noexcept {
+ return _release(Support::ptr_cast_impl<void*, Func>(p));
+ }
+
+ //! Type-unsafe version of `add()`.
+ ASMJIT_API virtual Error _add(void** dst, CodeHolder* code) noexcept;
+
+ //! Type-unsafe version of `release()`.
+ ASMJIT_API virtual Error _release(void* p) noexcept;
+
+ //! Flushes an instruction cache.
+ //!
+ //! This member function is called after the code has been copied to the
+ //! destination buffer. It is only useful for JIT code generation as it
+ //! causes a flush of the processor's cache.
+ //!
+ //! Flushing is basically a NOP under X86, but is needed by architectures
+ //! that do not have a transparent instruction cache like ARM.
+ //!
+ //! This function can also be overridden to improve compatibility with tools
+ //! such as Valgrind, however, it's not an official part of AsmJit.
+ ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/logging.cpp b/3rdparty/asmjit/src/asmjit/core/logging.cpp
new file mode 100644
index 00000000000..7e10af27c2d
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/logging.cpp
@@ -0,0 +1,535 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_LOGGING
+
+#include "../core/builder.h"
+#include "../core/codeholder.h"
+#include "../core/compiler.h"
+#include "../core/emitter.h"
+#include "../core/logging.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/type.h"
+
+#ifdef ASMJIT_BUILD_X86
+ #include "../x86/x86logging_p.h"
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ #include "../arm/armlogging_p.h"
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+#if defined(ASMJIT_NO_COMPILER)
+class VirtReg;
+#endif
+
+// ============================================================================
+// [asmjit::Logger - Construction / Destruction]
+// ============================================================================
+
+Logger::Logger() noexcept
+ : _options() {}
+Logger::~Logger() noexcept {}
+
+// ============================================================================
+// [asmjit::Logger - Logging]
+// ============================================================================
+
+Error Logger::logf(const char* fmt, ...) noexcept {
+ Error err;
+ va_list ap;
+
+ va_start(ap, fmt);
+ err = logv(fmt, ap);
+ va_end(ap);
+
+ return err;
+}
+
+Error Logger::logv(const char* fmt, va_list ap) noexcept {
+ StringTmp<2048> sb;
+ ASMJIT_PROPAGATE(sb.appendVFormat(fmt, ap));
+ return log(sb);
+}
+
+Error Logger::logBinary(const void* data, size_t size) noexcept {
+ static const char prefix[] = "db ";
+
+ StringTmp<256> sb;
+ sb.appendString(prefix, ASMJIT_ARRAY_SIZE(prefix) - 1);
+
+ size_t i = size;
+ const uint8_t* s = static_cast<const uint8_t*>(data);
+
+ while (i) {
+ uint32_t n = uint32_t(Support::min<size_t>(i, 16));
+ sb.truncate(ASMJIT_ARRAY_SIZE(prefix) - 1);
+ sb.appendHex(s, n);
+ sb.appendChar('\n');
+ ASMJIT_PROPAGATE(log(sb));
+ s += n;
+ i -= n;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::FileLogger - Construction / Destruction]
+// ============================================================================
+
+FileLogger::FileLogger(FILE* file) noexcept
+ : _file(nullptr) { setFile(file); }
+FileLogger::~FileLogger() noexcept {}
+
+// ============================================================================
+// [asmjit::FileLogger - Logging]
+// ============================================================================
+
+Error FileLogger::_log(const char* data, size_t size) noexcept {
+ if (!_file)
+ return kErrorOk;
+
+ if (size == SIZE_MAX)
+ size = strlen(data);
+
+ fwrite(data, 1, size, _file);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::StringLogger - Construction / Destruction]
+// ============================================================================
+
+StringLogger::StringLogger() noexcept {}
+StringLogger::~StringLogger() noexcept {}
+
+// ============================================================================
+// [asmjit::StringLogger - Logging]
+// ============================================================================
+
+Error StringLogger::_log(const char* data, size_t size) noexcept {
+ return _content.appendString(data, size);
+}
+
+// ============================================================================
+// [asmjit::Logging]
+// ============================================================================
+
+Error Logging::formatLabel(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t labelId) noexcept {
+
+ DebugUtils::unused(flags);
+
+ const LabelEntry* le = emitter->code()->labelEntry(labelId);
+ if (ASMJIT_UNLIKELY(!le))
+ return sb.appendFormat("InvalidLabel[Id=%u]", labelId);
+
+ if (le->hasName()) {
+ if (le->hasParent()) {
+ uint32_t parentId = le->parentId();
+ const LabelEntry* pe = emitter->code()->labelEntry(parentId);
+
+ if (ASMJIT_UNLIKELY(!pe))
+ ASMJIT_PROPAGATE(sb.appendFormat("InvalidLabel[Id=%u]", labelId));
+ else if (ASMJIT_UNLIKELY(!pe->hasName()))
+ ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
+ else
+ ASMJIT_PROPAGATE(sb.appendString(pe->name()));
+
+ ASMJIT_PROPAGATE(sb.appendChar('.'));
+ }
+ return sb.appendString(le->name());
+ }
+ else {
+ return sb.appendFormat("L%u", labelId);
+ }
+}
+
+Error Logging::formatRegister(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ uint32_t regType,
+ uint32_t regId) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::LoggingInternal::formatRegister(sb, flags, emitter, archId, regType, regId);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::LoggingInternal::formatRegister(sb, flags, emitter, archId, regType, regId);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error Logging::formatOperand(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const Operand_& op) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::LoggingInternal::formatOperand(sb, flags, emitter, archId, op);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::LoggingInternal::formatOperand(sb, flags, emitter, archId, op);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error Logging::formatInstruction(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept {
+
+#ifdef ASMJIT_BUILD_X86
+ if (ArchInfo::isX86Family(archId))
+ return x86::LoggingInternal::formatInstruction(sb, flags, emitter, archId, inst, operands, opCount);
+#endif
+
+#ifdef ASMJIT_BUILD_ARM
+ if (ArchInfo::isArmFamily(archId))
+ return arm::LoggingInternal::formatInstruction(sb, flags, emitter, archId, inst, operands, opCount);
+#endif
+
+ return kErrorInvalidArch;
+}
+
+Error Logging::formatTypeId(String& sb, uint32_t typeId) noexcept {
+ if (typeId == Type::kIdVoid)
+ return sb.appendString("void");
+
+ if (!Type::isValid(typeId))
+ return sb.appendString("unknown");
+
+ const char* typeName = "unknown";
+ uint32_t typeSize = Type::sizeOf(typeId);
+
+ uint32_t baseId = Type::baseOf(typeId);
+ switch (baseId) {
+ case Type::kIdIntPtr : typeName = "iptr" ; break;
+ case Type::kIdUIntPtr: typeName = "uptr" ; break;
+ case Type::kIdI8 : typeName = "i8" ; break;
+ case Type::kIdU8 : typeName = "u8" ; break;
+ case Type::kIdI16 : typeName = "i16" ; break;
+ case Type::kIdU16 : typeName = "u16" ; break;
+ case Type::kIdI32 : typeName = "i32" ; break;
+ case Type::kIdU32 : typeName = "u32" ; break;
+ case Type::kIdI64 : typeName = "i64" ; break;
+ case Type::kIdU64 : typeName = "u64" ; break;
+ case Type::kIdF32 : typeName = "f32" ; break;
+ case Type::kIdF64 : typeName = "f64" ; break;
+ case Type::kIdF80 : typeName = "f80" ; break;
+ case Type::kIdMask8 : typeName = "mask8" ; break;
+ case Type::kIdMask16 : typeName = "mask16"; break;
+ case Type::kIdMask32 : typeName = "mask32"; break;
+ case Type::kIdMask64 : typeName = "mask64"; break;
+ case Type::kIdMmx32 : typeName = "mmx32" ; break;
+ case Type::kIdMmx64 : typeName = "mmx64" ; break;
+ }
+
+ uint32_t baseSize = Type::sizeOf(baseId);
+ if (typeSize > baseSize) {
+ uint32_t count = typeSize / baseSize;
+ return sb.appendFormat("%sx%u", typeName, unsigned(count));
+ }
+ else {
+ return sb.appendString(typeName);
+ }
+
+}
+
+#ifndef ASMJIT_NO_BUILDER
+static Error formatFuncValue(String& sb, uint32_t flags, const BaseEmitter* emitter, FuncValue value) noexcept {
+ uint32_t typeId = value.typeId();
+ ASMJIT_PROPAGATE(Logging::formatTypeId(sb, typeId));
+
+ if (value.isReg()) {
+ ASMJIT_PROPAGATE(sb.appendChar('@'));
+ ASMJIT_PROPAGATE(Logging::formatRegister(sb, flags, emitter, emitter->archId(), value.regType(), value.regId()));
+ }
+
+ if (value.isStack()) {
+ ASMJIT_PROPAGATE(sb.appendFormat("@[%d]", int(value.stackOffset())));
+ }
+
+ return kErrorOk;
+}
+
+static Error formatFuncRets(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ const FuncDetail& fd,
+ VirtReg* const* vRegs) noexcept {
+
+ if (!fd.hasRet())
+ return sb.appendString("void");
+
+ for (uint32_t i = 0; i < fd.retCount(); i++) {
+ if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
+ ASMJIT_PROPAGATE(formatFuncValue(sb, flags, emitter, fd.ret(i)));
+
+#ifndef ASMJIT_NO_COMPILER
+ if (vRegs) {
+ static const char nullRet[] = "<none>";
+ ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullRet));
+ }
+#else
+ DebugUtils::unused(vRegs);
+#endif
+ }
+
+ return kErrorOk;
+}
+
+static Error formatFuncArgs(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ const FuncDetail& fd,
+ VirtReg* const* vRegs) noexcept {
+
+ uint32_t count = fd.argCount();
+ if (!count)
+ return sb.appendString("void");
+
+ for (uint32_t i = 0; i < count; i++) {
+ if (i) ASMJIT_PROPAGATE(sb.appendString(", "));
+ ASMJIT_PROPAGATE(formatFuncValue(sb, flags, emitter, fd.arg(i)));
+
+#ifndef ASMJIT_NO_COMPILER
+ if (vRegs) {
+ static const char nullArg[] = "<none>";
+ ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullArg));
+ }
+#else
+ DebugUtils::unused(vRegs);
+#endif
+ }
+
+ return kErrorOk;
+}
+
+Error Logging::formatNode(
+ String& sb,
+ uint32_t flags,
+ const BaseBuilder* cb,
+ const BaseNode* node_) noexcept {
+
+ if (node_->hasPosition() && (flags & FormatOptions::kFlagPositions) != 0)
+ ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node_->position()));
+
+ switch (node_->type()) {
+ case BaseNode::kNodeInst:
+ case BaseNode::kNodeJump: {
+ const InstNode* node = node_->as<InstNode>();
+ ASMJIT_PROPAGATE(
+ Logging::formatInstruction(sb, flags, cb,
+ cb->archId(),
+ node->baseInst(), node->operands(), node->opCount()));
+ break;
+ }
+
+ case BaseNode::kNodeSection: {
+ const SectionNode* node = node_->as<SectionNode>();
+ if (cb->_code->isSectionValid(node->id())) {
+ const Section* section = cb->_code->sectionById(node->id());
+ ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
+ }
+ break;
+ }
+
+ case BaseNode::kNodeLabel: {
+ const LabelNode* node = node_->as<LabelNode>();
+ ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id()));
+ ASMJIT_PROPAGATE(sb.appendString(":"));
+ break;
+ }
+
+ case BaseNode::kNodeAlign: {
+ const AlignNode* node = node_->as<AlignNode>();
+ ASMJIT_PROPAGATE(
+ sb.appendFormat(".align %u (%s)",
+ node->alignment(),
+ node->alignMode() == kAlignCode ? "code" : "data"));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedData: {
+ const EmbedDataNode* node = node_->as<EmbedDataNode>();
+ ASMJIT_PROPAGATE(sb.appendFormat(".embed (%u bytes)", node->size()));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedLabel: {
+ const EmbedLabelNode* node = node_->as<EmbedLabelNode>();
+ ASMJIT_PROPAGATE(sb.appendString(".label "));
+ ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id()));
+ break;
+ }
+
+ case BaseNode::kNodeEmbedLabelDelta: {
+ const EmbedLabelDeltaNode* node = node_->as<EmbedLabelDeltaNode>();
+ ASMJIT_PROPAGATE(sb.appendString(".label ("));
+ ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id()));
+ ASMJIT_PROPAGATE(sb.appendString(" - "));
+ ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->baseId()));
+ ASMJIT_PROPAGATE(sb.appendString(")"));
+ break;
+ }
+
+ case BaseNode::kNodeComment: {
+ const CommentNode* node = node_->as<CommentNode>();
+ ASMJIT_PROPAGATE(sb.appendFormat("; %s", node->inlineComment()));
+ break;
+ }
+
+ case BaseNode::kNodeSentinel: {
+ const SentinelNode* node = node_->as<SentinelNode>();
+ const char* sentinelName = nullptr;
+
+ switch (node->sentinelType()) {
+ case SentinelNode::kSentinelFuncEnd:
+ sentinelName = "[FuncEnd]";
+ break;
+
+ default:
+ sentinelName = "[Sentinel]";
+ break;
+ }
+
+ ASMJIT_PROPAGATE(sb.appendString(sentinelName));
+ break;
+ }
+
+#ifndef ASMJIT_NO_COMPILER
+ case BaseNode::kNodeFunc: {
+ const FuncNode* node = node_->as<FuncNode>();
+
+ ASMJIT_PROPAGATE(formatLabel(sb, flags, cb, node->id()));
+ ASMJIT_PROPAGATE(sb.appendString(": "));
+
+ ASMJIT_PROPAGATE(formatFuncRets(sb, flags, cb, node->detail(), nullptr));
+ ASMJIT_PROPAGATE(sb.appendString(" Func("));
+ ASMJIT_PROPAGATE(formatFuncArgs(sb, flags, cb, node->detail(), node->args()));
+ ASMJIT_PROPAGATE(sb.appendString(")"));
+ break;
+ }
+
+ case BaseNode::kNodeFuncRet: {
+ const FuncRetNode* node = node_->as<FuncRetNode>();
+ ASMJIT_PROPAGATE(sb.appendString("[FuncRet]"));
+
+ for (uint32_t i = 0; i < 2; i++) {
+ const Operand_& op = node->_opArray[i];
+ if (!op.isNone()) {
+ ASMJIT_PROPAGATE(sb.appendString(i == 0 ? " " : ", "));
+ ASMJIT_PROPAGATE(formatOperand(sb, flags, cb, cb->archId(), op));
+ }
+ }
+ break;
+ }
+
+ case BaseNode::kNodeFuncCall: {
+ const FuncCallNode* node = node_->as<FuncCallNode>();
+ ASMJIT_PROPAGATE(
+ Logging::formatInstruction(sb, flags, cb,
+ cb->archId(),
+ node->baseInst(), node->operands(), node->opCount()));
+ break;
+ }
+#endif
+
+ default: {
+ ASMJIT_PROPAGATE(sb.appendFormat("[User:%u]", node_->type()));
+ break;
+ }
+ }
+
+ return kErrorOk;
+}
+#endif
+
+Error Logging::formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
+ size_t currentSize = sb.size();
+ size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
+
+ ASMJIT_ASSERT(binSize >= dispSize);
+ const size_t kNoBinSize = std::numeric_limits<size_t>::max();
+
+ if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
+ size_t align = kMaxInstLineSize;
+ char sep = ';';
+
+ for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
+ size_t begin = sb.size();
+ ASMJIT_PROPAGATE(sb.padEnd(align));
+
+ if (sep) {
+ ASMJIT_PROPAGATE(sb.appendChar(sep));
+ ASMJIT_PROPAGATE(sb.appendChar(' '));
+ }
+
+ // Append binary data or comment.
+ if (i == 0) {
+ ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
+ ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
+ ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
+ if (commentSize == 0) break;
+ }
+ else {
+ ASMJIT_PROPAGATE(sb.appendString(comment, commentSize));
+ }
+
+ currentSize += sb.size() - begin;
+ align += kMaxBinarySize;
+ sep = '|';
+ }
+ }
+
+ return sb.appendChar('\n');
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/logging.h b/3rdparty/asmjit/src/asmjit/core/logging.h
new file mode 100644
index 00000000000..468e3a1ba2b
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/logging.h
@@ -0,0 +1,355 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
+#define ASMJIT_CORE_LOGGING_H_INCLUDED
+
+#include "../core/inst.h"
+#include "../core/string.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+#ifndef ASMJIT_NO_LOGGING
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class BaseEmitter;
+class BaseReg;
+class Logger;
+struct Operand_;
+
+#ifndef ASMJIT_NO_BUILDER
+class BaseBuilder;
+class BaseNode;
+#endif
+
+// ============================================================================
+// [asmjit::FormatOptions]
+// ============================================================================
+
+class FormatOptions {
+public:
+ uint32_t _flags;
+ uint8_t _indentation[4];
+
+ enum Flags : uint32_t {
+ //! Show also binary form of each logged instruction (assembler).
+ kFlagMachineCode = 0x00000001u,
+ //! Show a text explanation of some immediate values.
+ kFlagExplainImms = 0x00000002u,
+ //! Use hexadecimal notation of immediate values.
+ kFlagHexImms = 0x00000004u,
+ //! Use hexadecimal notation of address offsets.
+ kFlagHexOffsets = 0x00000008u,
+ //! Show casts between virtual register types (compiler).
+ kFlagRegCasts = 0x00000010u,
+ //! Show positions associated with nodes (compiler).
+ kFlagPositions = 0x00000020u,
+ //! Annotate nodes that are lowered by passes.
+ kFlagAnnotations = 0x00000040u,
+
+ // TODO: These must go, keep this only for formatting.
+ //! Show an additional output from passes.
+ kFlagDebugPasses = 0x00000080u,
+ //! Show an additional output from RA.
+ kFlagDebugRA = 0x00000100u
+ };
+
+ enum IndentationType : uint32_t {
+ //! Indentation used for instructions and directives.
+ kIndentationCode = 0u,
+ //! Indentation used for labels and function nodes.
+ kIndentationLabel = 1u,
+ //! Indentation used for comments (not inline comments).
+ kIndentationComment = 2u,
+ kIndentationReserved = 3u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ constexpr FormatOptions() noexcept
+ : _flags(0),
+ _indentation { 0, 0, 0, 0 } {}
+
+ constexpr FormatOptions(const FormatOptions& other) noexcept = default;
+ inline FormatOptions& operator=(const FormatOptions& other) noexcept = default;
+
+ inline void reset() noexcept {
+ _flags = 0;
+ _indentation[0] = 0;
+ _indentation[1] = 0;
+ _indentation[2] = 0;
+ _indentation[3] = 0;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ constexpr uint32_t flags() const noexcept { return _flags; }
+ constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; }
+ inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); }
+ inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Logger]
+// ============================================================================
+
+//! Abstract logging interface and helpers.
+//!
+//! This class can be inherited and reimplemented to fit into your logging
+//! subsystem. When reimplementing use `Logger::_log()` method to log into
+//! a custom stream.
+//!
+//! There are two `Logger` implementations offered by AsmJit:
+//! - `FileLogger` - allows to log into `FILE*`.
+//! - `StringLogger` - logs into a `String`.
+class ASMJIT_VIRTAPI Logger {
+public:
+ ASMJIT_BASE_CLASS(Logger)
+ ASMJIT_NONCOPYABLE(Logger)
+
+ //! Format options.
+ FormatOptions _options;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `Logger` instance.
+ ASMJIT_API Logger() noexcept;
+ //! Destroys the `Logger` instance.
+ ASMJIT_API virtual ~Logger() noexcept;
+
+ //! \}
+
+ //! \name Format Options
+ //! \{
+
+ inline FormatOptions& options() noexcept { return _options; }
+ inline const FormatOptions& options() const noexcept { return _options; }
+
+ inline uint32_t flags() const noexcept { return _options.flags(); }
+ inline bool hasFlag(uint32_t flag) const noexcept { return _options.hasFlag(flag); }
+ inline void setFlags(uint32_t flags) noexcept { _options.setFlags(flags); }
+ inline void addFlags(uint32_t flags) noexcept { _options.addFlags(flags); }
+ inline void clearFlags(uint32_t flags) noexcept { _options.clearFlags(flags); }
+
+ inline uint32_t indentation(uint32_t type) const noexcept { return _options.indentation(type); }
+ inline void setIndentation(uint32_t type, uint32_t n) noexcept { _options.setIndentation(type, n); }
+ inline void resetIndentation(uint32_t type) noexcept { _options.resetIndentation(type); }
+
+ //! \}
+
+ //! \name Logging Interface
+ //! \{
+
+ //! Logs `str` - must be reimplemented.
+ virtual Error _log(const char* data, size_t size) noexcept = 0;
+
+ //! Logs string `str`, which is either null terminated or having size `size`.
+ inline Error log(const char* data, size_t size = SIZE_MAX) noexcept { return _log(data, size); }
+ //! Logs content of a string `str`.
+ inline Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
+
+ //! Formats the message by using `snprintf()` and then sends the result
+ //! to `log()`.
+ ASMJIT_API Error logf(const char* fmt, ...) noexcept;
+
+ //! Formats the message by using `vsnprintf()` and then sends the result
+ //! to `log()`.
+ ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
+
+ //! Logs binary data.
+ ASMJIT_API Error logBinary(const void* data, size_t size) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FileLogger]
+// ============================================================================
+
+//! Logger that can log to a `FILE*`.
+class ASMJIT_VIRTAPI FileLogger : public Logger {
+public:
+ ASMJIT_NONCOPYABLE(FileLogger)
+
+ FILE* _file;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `FileLogger` that logs to `FILE*`.
+ ASMJIT_API FileLogger(FILE* file = nullptr) noexcept;
+ //! Destroys the `FileLogger`.
+ ASMJIT_API virtual ~FileLogger() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the logging output stream or null if the logger has no output
+ //! stream.
+ inline FILE* file() const noexcept { return _file; }
+
+ //! Sets the logging output stream to `stream` or null.
+ //!
+ //! \note If the `file` is null the logging will be disabled. When a logger
+ //! is attached to `CodeHolder` or any emitter the logging API will always
+ //! be called regardless of the output file. This means that if you really
+ //! want to disable logging at emitter level you must not attach a logger
+ //! to it.
+ inline void setFile(FILE* file) noexcept { _file = file; }
+
+ //! \}
+
+ ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
+};
+
+// ============================================================================
+// [asmjit::StringLogger]
+// ============================================================================
+
+//! Logger that stores everything in an internal string buffer.
+class ASMJIT_VIRTAPI StringLogger : public Logger {
+public:
+ ASMJIT_NONCOPYABLE(StringLogger)
+
+ //! Logger data as string.
+ String _content;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Create new `StringLogger`.
+ ASMJIT_API StringLogger() noexcept;
+ //! Destroys the `StringLogger`.
+ ASMJIT_API virtual ~StringLogger() noexcept;
+
+ //! \}
+
+ //! \name Logger Data Accessors
+ //! \{
+
+ //! Returns aggregated logger data as `char*` pointer.
+ //!
+ //! The pointer is owned by `StringLogger`, it can't be modified or freed.
+ inline const char* data() const noexcept { return _content.data(); }
+ //! Returns size of the data returned by `data()`.
+ inline size_t dataSize() const noexcept { return _content.size(); }
+
+ //! \}
+
+ //! \name Logger Data Manipulation
+ //! \{
+
+ //! Clears the accumulated logger data.
+ inline void clear() noexcept { _content.clear(); }
+
+ //! \}
+
+ ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
+};
+
+// ============================================================================
+// [asmjit::Logging]
+// ============================================================================
+
+struct Logging {
+ ASMJIT_API static Error formatRegister(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ uint32_t regType,
+ uint32_t regId) noexcept;
+
+ ASMJIT_API static Error formatLabel(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t labelId) noexcept;
+
+ ASMJIT_API static Error formatOperand(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const Operand_& op) noexcept;
+
+ ASMJIT_API static Error formatInstruction(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept;
+
+ ASMJIT_API static Error formatTypeId(
+ String& sb,
+ uint32_t typeId) noexcept;
+
+#ifndef ASMJIT_NO_BUILDER
+ ASMJIT_API static Error formatNode(
+ String& sb,
+ uint32_t flags,
+ const BaseBuilder* cb,
+ const BaseNode* node_) noexcept;
+#endif
+
+ // Only used by AsmJit internals, not available to users.
+#ifdef ASMJIT_EXPORTS
+ enum {
+ // Has to be big to be able to hold all metadata compiler can assign to a
+ // single instruction.
+ kMaxInstLineSize = 44,
+ kMaxBinarySize = 26
+ };
+
+ static Error formatLine(
+ String& sb,
+ const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
+#endif
+};
+#endif
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_LOGGER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/misc_p.h b/3rdparty/asmjit/src/asmjit/core/misc_p.h
new file mode 100644
index 00000000000..916ca9db67c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/misc_p.h
@@ -0,0 +1,49 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
+#define ASMJIT_CORE_MISC_P_H_INCLUDED
+
+#include "../core/api-config.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_support
+//! \{
+
+#define ASMJIT_LOOKUP_TABLE_8(T, I) T((I)), T((I+1)), T((I+2)), T((I+3)), T((I+4)), T((I+5)), T((I+6)), T((I+7))
+#define ASMJIT_LOOKUP_TABLE_16(T, I) ASMJIT_LOOKUP_TABLE_8(T, I), ASMJIT_LOOKUP_TABLE_8(T, I + 8)
+#define ASMJIT_LOOKUP_TABLE_32(T, I) ASMJIT_LOOKUP_TABLE_16(T, I), ASMJIT_LOOKUP_TABLE_16(T, I + 16)
+#define ASMJIT_LOOKUP_TABLE_64(T, I) ASMJIT_LOOKUP_TABLE_32(T, I), ASMJIT_LOOKUP_TABLE_32(T, I + 32)
+#define ASMJIT_LOOKUP_TABLE_128(T, I) ASMJIT_LOOKUP_TABLE_64(T, I), ASMJIT_LOOKUP_TABLE_64(T, I + 64)
+#define ASMJIT_LOOKUP_TABLE_256(T, I) ASMJIT_LOOKUP_TABLE_128(T, I), ASMJIT_LOOKUP_TABLE_128(T, I + 128)
+#define ASMJIT_LOOKUP_TABLE_512(T, I) ASMJIT_LOOKUP_TABLE_256(T, I), ASMJIT_LOOKUP_TABLE_256(T, I + 256)
+#define ASMJIT_LOOKUP_TABLE_1024(T, I) ASMJIT_LOOKUP_TABLE_512(T, I), ASMJIT_LOOKUP_TABLE_512(T, I + 512)
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_MISC_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/operand.cpp b/3rdparty/asmjit/src/asmjit/core/operand.cpp
new file mode 100644
index 00000000000..9d11f3f6ddc
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/operand.cpp
@@ -0,0 +1,136 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Operand - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(operand) {
+ INFO("Checking operand sizes");
+ EXPECT(sizeof(Operand) == 16);
+ EXPECT(sizeof(BaseReg) == 16);
+ EXPECT(sizeof(BaseMem) == 16);
+ EXPECT(sizeof(Imm) == 16);
+ EXPECT(sizeof(Label) == 16);
+
+ INFO("Checking basic functionality of Operand");
+ Operand a, b;
+ Operand dummy;
+
+ EXPECT(a.isNone() == true);
+ EXPECT(a.isReg() == false);
+ EXPECT(a.isMem() == false);
+ EXPECT(a.isImm() == false);
+ EXPECT(a.isLabel() == false);
+ EXPECT(a == b);
+ EXPECT(a._data[0] == 0);
+ EXPECT(a._data[1] == 0);
+
+ INFO("Checking basic functionality of Label");
+ Label label;
+ EXPECT(label.isValid() == false);
+ EXPECT(label.id() == Globals::kInvalidId);
+
+ INFO("Checking basic functionality of BaseReg");
+ EXPECT(BaseReg().isReg() == true);
+ EXPECT(BaseReg().isValid() == false);
+ EXPECT(BaseReg()._data[0] == 0);
+ EXPECT(BaseReg()._data[1] == 0);
+ EXPECT(dummy.as<BaseReg>().isValid() == false);
+
+ // Create some register (not specific to any architecture).
+ uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift ) |
+ (2 << Operand::kSignatureRegGroupShift) |
+ (8 << Operand::kSignatureSizeShift ) ;
+ BaseReg r1(rSig, 5);
+
+ EXPECT(r1.isValid() == true);
+ EXPECT(r1.isReg() == true);
+ EXPECT(r1.isReg(1) == true);
+ EXPECT(r1.isPhysReg() == true);
+ EXPECT(r1.isVirtReg() == false);
+ EXPECT(r1.signature() == rSig);
+ EXPECT(r1.type() == 1);
+ EXPECT(r1.group() == 2);
+ EXPECT(r1.size() == 8);
+ EXPECT(r1.id() == 5);
+ EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
+ EXPECT(r1._data[0] == 0);
+ EXPECT(r1._data[1] == 0);
+
+ // The same type of register having different id.
+ BaseReg r2(r1, 6);
+ EXPECT(r2.isValid() == true);
+ EXPECT(r2.isReg() == true);
+ EXPECT(r2.isReg(1) == true);
+ EXPECT(r2.isPhysReg() == true);
+ EXPECT(r2.isVirtReg() == false);
+ EXPECT(r2.signature() == rSig);
+ EXPECT(r2.type() == r1.type());
+ EXPECT(r2.group() == r1.group());
+ EXPECT(r2.size() == r1.size());
+ EXPECT(r2.id() == 6);
+ EXPECT(r2.isReg(1, 6) == true);
+
+ r1.reset();
+ EXPECT(!r1.isReg());
+ EXPECT(!r1.isValid());
+
+ INFO("Checking basic functionality of BaseMem");
+ BaseMem m;
+ EXPECT(m.isMem());
+ EXPECT(m == BaseMem());
+ EXPECT(m.hasBase() == false);
+ EXPECT(m.hasIndex() == false);
+ EXPECT(m.hasOffset() == false);
+ EXPECT(m.isOffset64Bit() == true);
+ EXPECT(m.offset() == 0);
+
+ m.setOffset(-1);
+ EXPECT(m.offsetLo32() == -1);
+ EXPECT(m.offset() == -1);
+
+ int64_t x = int64_t(0xFF00FF0000000001u);
+ int32_t xHi = int32_t(0xFF00FF00u);
+
+ m.setOffset(x);
+ EXPECT(m.offset() == x);
+ EXPECT(m.offsetLo32() == 1);
+ EXPECT(m.offsetHi32() == xHi);
+
+ INFO("Checking basic functionality of Imm");
+ Imm immValue(-42);
+ EXPECT(Imm(-1).i64() == int64_t(-1));
+ EXPECT(imm(-1).i64() == int64_t(-1));
+ EXPECT(immValue.i64() == int64_t(-42));
+ EXPECT(imm(0xFFFFFFFF).i64() == int64_t(0xFFFFFFFF));
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/operand.h b/3rdparty/asmjit/src/asmjit/core/operand.h
new file mode 100644
index 00000000000..eee1af4244e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/operand.h
@@ -0,0 +1,1337 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_OPERAND_H_INCLUDED
+#define ASMJIT_CORE_OPERAND_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [Macros]
+// ============================================================================
+
+//! Adds a template specialization for `REG_TYPE` into the local `RegTraits`.
+#define ASMJIT_DEFINE_REG_TRAITS(REG, REG_TYPE, GROUP, SIZE, COUNT, TYPE_ID) \
+template<> \
+struct RegTraits<REG_TYPE> { \
+ typedef REG RegT; \
+ \
+ static constexpr uint32_t kValid = 1; \
+ static constexpr uint32_t kCount = COUNT; \
+ static constexpr uint32_t kTypeId = TYPE_ID; \
+ \
+ static constexpr uint32_t kType = REG_TYPE; \
+ static constexpr uint32_t kGroup = GROUP; \
+ static constexpr uint32_t kSize = SIZE; \
+ \
+ static constexpr uint32_t kSignature = \
+ (Operand::kOpReg << Operand::kSignatureOpShift ) | \
+ (kType << Operand::kSignatureRegTypeShift ) | \
+ (kGroup << Operand::kSignatureRegGroupShift) | \
+ (kSize << Operand::kSignatureSizeShift ) ; \
+}
+
+//! Adds constructors and member functions to a class that implements abstract
+//! register. Abstract register is register that doesn't have type or signature
+//! yet, it's a base class like `x86::Reg` or `arm::Reg`.
+#define ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \
+public: \
+ /*! Default constructor that only setups basics. */ \
+ constexpr REG() noexcept \
+ : BASE(kSignature, kIdBad) {} \
+ \
+ /*! Makes a copy of the `other` register operand. */ \
+ constexpr REG(const REG& other) noexcept \
+ : BASE(other) {} \
+ \
+ /*! Makes a copy of the `other` register having id set to `rId` */ \
+ constexpr REG(const BaseReg& other, uint32_t rId) noexcept \
+ : BASE(other, rId) {} \
+ \
+ /*! Creates a register based on `signature` and `rId`. */ \
+ constexpr REG(uint32_t signature, uint32_t rId) noexcept \
+ : BASE(signature, rId) {} \
+ \
+ /*! Creates a completely uninitialized REG register operand (garbage). */ \
+ inline explicit REG(Globals::NoInit_) noexcept \
+ : BASE(Globals::NoInit) {} \
+ \
+ /*! Creates a new register from register type and id. */ \
+ static inline REG fromTypeAndId(uint32_t rType, uint32_t rId) noexcept { \
+ return REG(signatureOf(rType), rId); \
+ } \
+ \
+ /*! Clones the register operand. */ \
+ constexpr REG clone() const noexcept { return REG(*this); } \
+ \
+ inline REG& operator=(const REG& other) noexcept = default;
+
+//! Adds constructors and member functions to a class that implements final
+//! register. Final registers MUST HAVE a valid signature.
+#define ASMJIT_DEFINE_FINAL_REG(REG, BASE, TRAITS) \
+public: \
+ static constexpr uint32_t kThisType = TRAITS::kType; \
+ static constexpr uint32_t kThisGroup = TRAITS::kGroup; \
+ static constexpr uint32_t kThisSize = TRAITS::kSize; \
+ static constexpr uint32_t kSignature = TRAITS::kSignature; \
+ \
+ ASMJIT_DEFINE_ABSTRACT_REG(REG, BASE) \
+ \
+ /*! Creates a register operand having its id set to `rId`. */ \
+ constexpr explicit REG(uint32_t rId) noexcept \
+ : BASE(kSignature, rId) {}
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::Operand_]
+// ============================================================================
+
+//! Constructor-less `Operand`.
+//!
+//! Contains no initialization code and can be used safely to define an array
+//! of operands that won't be initialized. This is an `Operand` compatible
+//! data structure designed to be statically initialized, static const, or to
+//! be used by the user to define an array of operands without having them
+//! default initialized.
+//!
+//! The key difference between `Operand` and `Operand_`:
+//!
+//! ```
+//! Operand_ xArray[10]; // Not initialized, contains garbage.
+//! Operand yArray[10]; // All operands initialized to none.
+//! ```
+struct Operand_ {
+ //! Operand's signature that provides operand type and additional information.
+ uint32_t _signature;
+ //! Either base id as used by memory operand or any id as used by others.
+ uint32_t _baseId;
+
+ //! Data specific to the operand type.
+ //!
+ //! The reason we don't use union is that we have `constexpr` constructors that
+ //! construct operands and other `constexpr` functions that return wither another
+ //! Operand or something else. These cannot generally work with unions so we also
+ //! cannot use `union` if we want to be standard compliant.
+ uint32_t _data[2];
+
+ //! Indexes to `_data` array.
+ enum DataIndex : uint32_t {
+ kDataMemIndexId = 0,
+ kDataMemOffsetLo = 1,
+
+ kDataImmValueLo = ASMJIT_ARCH_LE ? 0 : 1,
+ kDataImmValueHi = ASMJIT_ARCH_LE ? 1 : 0
+ };
+
+ //! Operand types that can be encoded in `Operand`.
+ enum OpType : uint32_t {
+ //! Not an operand or not initialized.
+ kOpNone = 0,
+ //! Operand is a register.
+ kOpReg = 1,
+ //! Operand is a memory.
+ kOpMem = 2,
+ //! Operand is an immediate value.
+ kOpImm = 3,
+ //! Operand is a label.
+ kOpLabel = 4
+ };
+ static_assert(kOpMem == kOpReg + 1, "asmjit::Operand requires `kOpMem` to be `kOpReg+1`.");
+
+ // \cond INTERNAL
+ enum SignatureBits : uint32_t {
+ // Operand type (3 least significant bits).
+ // |........|........|........|.....XXX|
+ kSignatureOpShift = 0,
+ kSignatureOpMask = 0x07u << kSignatureOpShift,
+
+ // Register type (5 bits).
+ // |........|........|........|XXXXX...|
+ kSignatureRegTypeShift = 3,
+ kSignatureRegTypeMask = 0x1Fu << kSignatureRegTypeShift,
+
+ // Register group (4 bits).
+ // |........|........|....XXXX|........|
+ kSignatureRegGroupShift = 8,
+ kSignatureRegGroupMask = 0x0Fu << kSignatureRegGroupShift,
+
+ // Memory base type (5 bits).
+ // |........|........|........|XXXXX...|
+ kSignatureMemBaseTypeShift = 3,
+ kSignatureMemBaseTypeMask = 0x1Fu << kSignatureMemBaseTypeShift,
+
+ // Memory index type (5 bits).
+ // |........|........|...XXXXX|........|
+ kSignatureMemIndexTypeShift = 8,
+ kSignatureMemIndexTypeMask = 0x1Fu << kSignatureMemIndexTypeShift,
+
+ // Memory base+index combined (10 bits).
+ // |........|........|...XXXXX|XXXXX...|
+ kSignatureMemBaseIndexShift = 3,
+ kSignatureMemBaseIndexMask = 0x3FFu << kSignatureMemBaseIndexShift,
+
+ // Memory address type (2 bits).
+ // |........|........|.XX.....|........|
+ kSignatureMemAddrTypeShift = 13,
+ kSignatureMemAddrTypeMask = 0x03u << kSignatureMemAddrTypeShift,
+
+ // This memory operand represents a home-slot or stack (BaseCompiler).
+ // |........|........|X.......|........|
+ kSignatureMemRegHomeShift = 15,
+ kSignatureMemRegHomeFlag = 0x01u << kSignatureMemRegHomeShift,
+
+ // Operand size (8 most significant bits).
+ // |XXXXXXXX|........|........|........|
+ kSignatureSizeShift = 24,
+ kSignatureSizeMask = 0xFFu << kSignatureSizeShift
+ };
+ //! \endcond
+
+ //! \cond INTERNAL
+ //! Constants useful for VirtId <-> Index translation.
+ enum VirtIdConstants : uint32_t {
+ //! Minimum valid packed-id.
+ kVirtIdMin = 256,
+ //! Maximum valid packed-id, excludes Globals::kInvalidId.
+ kVirtIdMax = Globals::kInvalidId - 1,
+ //! Count of valid packed-ids.
+ kVirtIdCount = uint32_t(kVirtIdMax - kVirtIdMin + 1)
+ };
+
+ //! Tests whether the given `id` is a valid virtual register id. Since AsmJit
+ //! supports both physical and virtual registers it must be able to distinguish
+ //! between these two. The idea is that physical registers are always limited
+ //! in size, so virtual identifiers start from `kVirtIdMin` and end at
+ //! `kVirtIdMax`.
+ static ASMJIT_INLINE bool isVirtId(uint32_t id) noexcept { return id - kVirtIdMin < uint32_t(kVirtIdCount); }
+ //! Converts a real-id into a packed-id that can be stored in Operand.
+ static ASMJIT_INLINE uint32_t indexToVirtId(uint32_t id) noexcept { return id + kVirtIdMin; }
+ //! Converts a packed-id back to real-id.
+ static ASMJIT_INLINE uint32_t virtIdToIndex(uint32_t id) noexcept { return id - kVirtIdMin; }
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! \cond INTERNAL
+ //! Initializes a `BaseReg` operand from `signature` and register `id`.
+ inline void _initReg(uint32_t signature, uint32_t id) noexcept {
+ _signature = signature;
+ _baseId = id;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! Initializes the operand from `other` (used by operator overloads).
+ inline void copyFrom(const Operand_& other) noexcept { memcpy(this, &other, sizeof(Operand_)); }
+ //! \endcond
+
+ //! Resets the `Operand` to none.
+ //!
+ //! None operand is defined the following way:
+ //! - Its signature is zero (kOpNone, and the rest zero as well).
+ //! - Its id is `0`.
+ //! - The reserved8_4 field is set to `0`.
+ //! - The reserved12_4 field is set to zero.
+ //!
+ //! In other words, reset operands have all members set to zero. Reset operand
+ //! must match the Operand state right after its construction. Alternatively,
+ //! if you have an array of operands, you can simply use `memset()`.
+ //!
+ //! ```
+ //! using namespace asmjit;
+ //!
+ //! Operand a;
+ //! Operand b;
+ //! assert(a == b);
+ //!
+ //! b = x86::eax;
+ //! assert(a != b);
+ //!
+ //! b.reset();
+ //! assert(a == b);
+ //!
+ //! memset(&b, 0, sizeof(Operand));
+ //! assert(a == b);
+ //! ```
+ inline void reset() noexcept {
+ _signature = 0;
+ _baseId = 0;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Operator Overloads
+ //! \{
+
+ constexpr bool operator==(const Operand_& other) const noexcept { return isEqual(other); }
+ constexpr bool operator!=(const Operand_& other) const noexcept { return !isEqual(other); }
+
+ //! \}
+
+ //! \name Cast
+ //! \{
+
+ //! Casts this operand to `T` type.
+ template<typename T>
+ inline T& as() noexcept { return static_cast<T&>(*this); }
+
+ //! Casts this operand to `T` type (const).
+ template<typename T>
+ inline const T& as() const noexcept { return static_cast<const T&>(*this); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the operand matches the given signature `sign`.
+ constexpr bool hasSignature(uint32_t signature) const noexcept { return _signature == signature; }
+ //! Tests whether the operand matches the signature of the `other` operand.
+ constexpr bool hasSignature(const Operand_& other) const noexcept { return _signature == other.signature(); }
+
+ //! Returns operand signature as unsigned 32-bit integer.
+ //!
+ //! Signature is first 4 bytes of the operand data. It's used mostly for
+ //! operand checking as it's much faster to check 4 bytes at once than having
+ //! to check these bytes individually.
+ constexpr uint32_t signature() const noexcept { return _signature; }
+
+ //! Sets the operand signature, see `signature()`.
+ //!
+ //! \note Improper use of `setSignature()` can lead to hard-to-debug errors.
+ inline void setSignature(uint32_t signature) noexcept { _signature = signature; }
+
+ //! \cond INTERNAL
+ template<uint32_t mask>
+ constexpr bool _hasSignaturePart() const noexcept {
+ return (_signature & mask) != 0;
+ }
+
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+
+ template<uint32_t mask>
+ inline void _setSignaturePart(uint32_t value) noexcept {
+ ASMJIT_ASSERT((value & ~(mask >> Support::constCtz(mask))) == 0);
+ _signature = (_signature & ~mask) | (value << Support::constCtz(mask));
+ }
+ //! \endcond
+
+ //! Returns the type of the operand, see `OpType`.
+ constexpr uint32_t opType() const noexcept { return _getSignaturePart<kSignatureOpMask>(); }
+ //! Tests whether the operand is none (`kOpNone`).
+ constexpr bool isNone() const noexcept { return _signature == 0; }
+ //! Tests whether the operand is a register (`kOpReg`).
+ constexpr bool isReg() const noexcept { return opType() == kOpReg; }
+ //! Tests whether the operand is a memory location (`kOpMem`).
+ constexpr bool isMem() const noexcept { return opType() == kOpMem; }
+ //! Tests whether the operand is an immediate (`kOpImm`).
+ constexpr bool isImm() const noexcept { return opType() == kOpImm; }
+ //! Tests whether the operand is a label (`kOpLabel`).
+ constexpr bool isLabel() const noexcept { return opType() == kOpLabel; }
+
+ //! Tests whether the operand is a physical register.
+ constexpr bool isPhysReg() const noexcept { return isReg() && _baseId < 0xFFu; }
+ //! Tests whether the operand is a virtual register.
+ constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; }
+
+ //! Tests whether the operand specifies a size (i.e. the size is not zero).
+ constexpr bool hasSize() const noexcept { return _hasSignaturePart<kSignatureSizeMask>(); }
+ //! Tests whether the size of the operand matches `size`.
+ constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; }
+
+ //! Returns the size of the operand in bytes.
+ //!
+ //! The value returned depends on the operand type:
+ //! * None - Should always return zero size.
+ //! * Reg - Should always return the size of the register. If the register
+ //! size depends on architecture (like `x86::CReg` and `x86::DReg`)
+ //! the size returned should be the greatest possible (so it should
+ //! return 64-bit size in such case).
+ //! * Mem - Size is optional and will be in most cases zero.
+ //! * Imm - Should always return zero size.
+ //! * Label - Should always return zero size.
+ constexpr uint32_t size() const noexcept { return _getSignaturePart<kSignatureSizeMask>(); }
+
+ //! Returns the operand id.
+ //!
+ //! The value returned should be interpreted accordingly to the operand type:
+ //! * None - Should be `0`.
+ //! * Reg - Physical or virtual register id.
+ //! * Mem - Multiple meanings - BASE address (register or label id), or
+ //! high value of a 64-bit absolute address.
+ //! * Imm - Should be `0`.
+ //! * Label - Label id if it was created by using `newLabel()` or
+ //! `Globals::kInvalidId` if the label is invalid or not
+ //! initialized.
+ constexpr uint32_t id() const noexcept { return _baseId; }
+
+ //! Tests whether the operand is 100% equal to `other`.
+ constexpr bool isEqual(const Operand_& other) const noexcept {
+ return (_signature == other._signature) &
+ (_baseId == other._baseId ) &
+ (_data[0] == other._data[0] ) &
+ (_data[1] == other._data[1] ) ;
+ }
+
+ //! Tests whether the operand is a register matching `rType`.
+ constexpr bool isReg(uint32_t rType) const noexcept {
+ return (_signature & (kSignatureOpMask | kSignatureRegTypeMask)) ==
+ ((kOpReg << kSignatureOpShift) | (rType << kSignatureRegTypeShift));
+ }
+
+ //! Tests whether the operand is register and of `rType` and `rId`.
+ constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept {
+ return isReg(rType) && id() == rId;
+ }
+
+ //! Tests whether the operand is a register or memory.
+ constexpr bool isRegOrMem() const noexcept {
+ return Support::isBetween<uint32_t>(opType(), kOpReg, kOpMem);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Operand]
+// ============================================================================
+
+//! Operand can contain register, memory location, immediate, or label.
+class Operand : public Operand_ {
+public:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates `kOpNone` operand having all members initialized to zero.
+ constexpr Operand() noexcept
+ : Operand_{ kOpNone, 0u, { 0u, 0u }} {}
+
+ //! Creates a cloned `other` operand.
+ constexpr Operand(const Operand& other) noexcept = default;
+
+ //! Creates a cloned `other` operand.
+ constexpr explicit Operand(const Operand_& other)
+ : Operand_(other) {}
+
+ //! Creates an operand initialized to raw `[u0, u1, u2, u3]` values.
+ constexpr Operand(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept
+ : Operand_{ u0, u1, { u2, u3 }} {}
+
+ //! Creates an uninitialized operand (dangerous).
+ inline explicit Operand(Globals::NoInit_) noexcept {}
+
+ //! \}
+
+ //! \name Operator Overloads
+ //! \{
+
+ inline Operand& operator=(const Operand& other) noexcept = default;
+ inline Operand& operator=(const Operand_& other) noexcept { return operator=(static_cast<const Operand&>(other)); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Clones this operand and returns its copy.
+ constexpr Operand clone() const noexcept { return Operand(*this); }
+
+ //! \}
+};
+
+static_assert(sizeof(Operand) == 16, "asmjit::Operand must be exactly 16 bytes long");
+
+namespace Globals {
+ //! A default-constructed operand of `Operand_::kOpNone` type.
+ static constexpr const Operand none;
+}
+
+// ============================================================================
+// [asmjit::Label]
+// ============================================================================
+
+//! Label (jump target or data location).
+//!
+//! Label represents a location in code typically used as a jump target, but
+//! may be also a reference to some data or a static variable. Label has to be
+//! explicitly created by BaseEmitter.
+//!
+//! Example of using labels:
+//!
+//! ```
+//! // Create some emitter (for example x86::Assembler).
+//! x86::Assembler a;
+//!
+//! // Create Label instance.
+//! Label L1 = a.newLabel();
+//!
+//! // ... your code ...
+//!
+//! // Using label.
+//! a.jump(L1);
+//!
+//! // ... your code ...
+//!
+//! // Bind label to the current position, see `BaseEmitter::bind()`.
+//! a.bind(L1);
+//! ```
+class Label : public Operand {
+public:
+ //! Type of the Label.
+ enum LabelType : uint32_t {
+ //! Anonymous (unnamed) label.
+ kTypeAnonymous = 0,
+ //! Local label (always has parentId).
+ kTypeLocal = 1,
+ //! Global label (never has parentId).
+ kTypeGlobal = 2,
+ //! Number of label types.
+ kTypeCount = 3
+ };
+
+ // TODO: Find a better place, find a better name.
+ enum {
+ //! Label tag is used as a sub-type, forming a unique signature across all
+ //! operand types as 0x1 is never associated with any register (reg-type).
+ //! This means that a memory operand's BASE register can be constructed
+ //! from virtually any operand (register vs. label) by just assigning its
+ //! type (reg type or label-tag) and operand id.
+ kLabelTag = 0x1
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a label operand without ID (you must set the ID to make it valid).
+ constexpr Label() noexcept
+ : Operand(Globals::Init, kOpLabel, Globals::kInvalidId, 0, 0) {}
+
+ //! Creates a cloned label operand of `other` .
+ constexpr Label(const Label& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a label operand of the given `id`.
+ constexpr explicit Label(uint32_t id) noexcept
+ : Operand(Globals::Init, kOpLabel, id, 0, 0) {}
+
+ inline explicit Label(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! Resets the label, will reset all properties and set its ID to `Globals::kInvalidId`.
+ inline void reset() noexcept {
+ _signature = kOpLabel;
+ _baseId = Globals::kInvalidId;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Label& operator=(const Label& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the label was created by CodeHolder and/or an attached emitter.
+ constexpr bool isValid() const noexcept { return _baseId != Globals::kInvalidId; }
+ //! Sets the label `id`.
+ inline void setId(uint32_t id) noexcept { _baseId = id; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseRegTraits]
+// ============================================================================
+
+//! \cond INTERNAL
+//! Default register traits.
+struct BaseRegTraits {
+ //! RegType is not valid by default.
+ static constexpr uint32_t kValid = 0;
+ //! Count of registers (0 if none).
+ static constexpr uint32_t kCount = 0;
+ //! Everything is void by default.
+ static constexpr uint32_t kTypeId = 0;
+
+ //! Zero type by default.
+ static constexpr uint32_t kType = 0;
+ //! Zero group by default.
+ static constexpr uint32_t kGroup = 0;
+ //! No size by default.
+ static constexpr uint32_t kSize = 0;
+
+ //! Empty signature by default.
+ static constexpr uint32_t kSignature = Operand::kOpReg;
+};
+//! \endcond
+
+// ============================================================================
+// [asmjit::BaseReg]
+// ============================================================================
+
+//! Structure that allows to extract a register information based on the signature.
+//!
+//! This information is compatible with operand's signature (32-bit integer)
+//! and `RegInfo` just provides easy way to access it.
+struct RegInfo {
+ inline void reset() noexcept { _signature = 0; }
+ inline void setSignature(uint32_t signature) noexcept { _signature = signature; }
+
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+
+ constexpr bool isValid() const noexcept { return _signature != 0; }
+ constexpr uint32_t signature() const noexcept { return _signature; }
+ constexpr uint32_t opType() const noexcept { return _getSignaturePart<Operand::kSignatureOpMask>(); }
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<Operand::kSignatureRegGroupMask>(); }
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<Operand::kSignatureRegTypeMask>(); }
+ constexpr uint32_t size() const noexcept { return _getSignaturePart<Operand::kSignatureSizeMask>(); }
+
+ uint32_t _signature;
+};
+
+//! Physical/Virtual register operand.
+class BaseReg : public Operand {
+public:
+ //! Architecture neutral register types.
+ //!
+ //! These must be reused by any platform that contains that types. All GP
+ //! and VEC registers are also allowed by design to be part of a BASE|INDEX
+ //! of a memory operand.
+ enum RegType : uint32_t {
+ //! No register - unused, invalid, multiple meanings.
+ kTypeNone = 0,
+
+ // (1 is used as a LabelTag)
+
+ //! 8-bit low general purpose register (X86).
+ kTypeGp8Lo = 2,
+ //! 8-bit high general purpose register (X86).
+ kTypeGp8Hi = 3,
+ //! 16-bit general purpose register (X86).
+ kTypeGp16 = 4,
+ //! 32-bit general purpose register (X86|ARM).
+ kTypeGp32 = 5,
+ //! 64-bit general purpose register (X86|ARM).
+ kTypeGp64 = 6,
+ //! 32-bit view of a vector register (ARM).
+ kTypeVec32 = 7,
+ //! 64-bit view of a vector register (ARM).
+ kTypeVec64 = 8,
+ //! 128-bit view of a vector register (X86|ARM).
+ kTypeVec128 = 9,
+ //! 256-bit view of a vector register (X86).
+ kTypeVec256 = 10,
+ //! 512-bit view of a vector register (X86).
+ kTypeVec512 = 11,
+ //! 1024-bit view of a vector register (future).
+ kTypeVec1024 = 12,
+ //! Other0 register, should match `kOther0` group.
+ kTypeOther0 = 13,
+ //! Other1 register, should match `kOther1` group.
+ kTypeOther1 = 14,
+ //! Universal id of IP/PC register (if separate).
+ kTypeIP = 15,
+ //! Start of platform dependent register types (must be honored).
+ kTypeCustom = 16,
+ //! Maximum possible register id of all architectures.
+ kTypeMax = 31
+ };
+
+ //! Register group (architecture neutral), and some limits.
+ enum RegGroup : uint32_t {
+ //! General purpose register group compatible with all backends.
+ kGroupGp = 0,
+ //! Vector register group compatible with all backends.
+ kGroupVec = 1,
+ //! Group that is architecture dependent.
+ kGroupOther0 = 2,
+ //! Group that is architecture dependent.
+ kGroupOther1 = 3,
+ //! Count of register groups used by virtual registers.
+ kGroupVirt = 4,
+ //! Count of register groups used by physical registers.
+ kGroupCount = 16
+ };
+
+ enum Id : uint32_t {
+ //! None or any register (mostly internal).
+ kIdBad = 0xFFu
+ };
+
+ static constexpr uint32_t kSignature = kOpReg;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a dummy register operand.
+ constexpr BaseReg() noexcept
+ : Operand(Globals::Init, kSignature, kIdBad, 0, 0) {}
+
+ //! Creates a new register operand which is the same as `other` .
+ constexpr BaseReg(const BaseReg& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a new register operand compatible with `other`, but with a different `rId`.
+ constexpr BaseReg(const BaseReg& other, uint32_t rId) noexcept
+ : Operand(Globals::Init, other._signature, rId, 0, 0) {}
+
+ //! Creates a register initialized to `signature` and `rId`.
+ constexpr BaseReg(uint32_t signature, uint32_t rId) noexcept
+ : Operand(Globals::Init, signature, rId, 0, 0) {}
+
+ inline explicit BaseReg(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseReg& operator=(const BaseReg& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this register is the same as `other`.
+ //!
+ //! This is just an optimization. Registers by default only use the first
+ //! 8 bytes of the Operand, so this method takes advantage of this knowledge
+ //! and only compares these 8 bytes. If both operands were created correctly
+ //! then `isEqual()` and `isSame()` should give the same answer, however, if
+ //! some one of the two operand contains a garbage or other metadata in the
+ //! upper 8 bytes then `isSame()` may return `true` in cases where `isEqual()`
+ //! returns false.
+ constexpr bool isSame(const BaseReg& other) const noexcept {
+ return (_signature == other._signature) &
+ (_baseId == other._baseId ) ;
+ }
+
+ //! Tests whether the register is valid (either virtual or physical).
+ constexpr bool isValid() const noexcept { return (_signature != 0) & (_baseId != kIdBad); }
+
+ //! Tests whether this is a physical register.
+ constexpr bool isPhysReg() const noexcept { return _baseId < kIdBad; }
+ //! Tests whether this is a virtual register.
+ constexpr bool isVirtReg() const noexcept { return _baseId > kIdBad; }
+
+ //! Tests whether the register type matches `type` - same as `isReg(type)`, provided for convenience.
+ constexpr bool isType(uint32_t type) const noexcept { return (_signature & kSignatureRegTypeMask) == (type << kSignatureRegTypeShift); }
+ //! Tests whether the register group matches `group`.
+ constexpr bool isGroup(uint32_t group) const noexcept { return (_signature & kSignatureRegGroupMask) == (group << kSignatureRegGroupShift); }
+
+ //! Tests whether the register is a general purpose register (any size).
+ constexpr bool isGp() const noexcept { return isGroup(kGroupGp); }
+ //! Tests whether the register is a vector register.
+ constexpr bool isVec() const noexcept { return isGroup(kGroupVec); }
+
+ using Operand_::isReg;
+
+ //! Same as `isType()`, provided for convenience.
+ constexpr bool isReg(uint32_t rType) const noexcept { return isType(rType); }
+ //! Tests whether the register type matches `type` and register id matches `rId`.
+ constexpr bool isReg(uint32_t rType, uint32_t rId) const noexcept { return isType(rType) && id() == rId; }
+
+ //! Returns the type of the register.
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<kSignatureRegTypeMask>(); }
+ //! Returns the register group.
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<kSignatureRegGroupMask>(); }
+
+ //! Clones the register operand.
+ constexpr BaseReg clone() const noexcept { return BaseReg(*this); }
+
+ //! Casts this register to `RegT` by also changing its signature.
+ //!
+ //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors.
+ template<typename RegT>
+ constexpr RegT cloneAs() const noexcept { return RegT(RegT::kSignature, id()); }
+
+ //! Casts this register to `other` by also changing its signature.
+ //!
+ //! \note Improper use of `cloneAs()` can lead to hard-to-debug errors.
+ template<typename RegT>
+ constexpr RegT cloneAs(const RegT& other) const noexcept { return RegT(other.signature(), id()); }
+
+ //! Sets the register id to `rId`.
+ inline void setId(uint32_t rId) noexcept { _baseId = rId; }
+
+ //! Sets a 32-bit operand signature based on traits of `RegT`.
+ template<typename RegT>
+ inline void setSignatureT() noexcept { _signature = RegT::kSignature; }
+
+ //! Sets the register `signature` and `rId`.
+ inline void setSignatureAndId(uint32_t signature, uint32_t rId) noexcept {
+ _signature = signature;
+ _baseId = rId;
+ }
+
+ //! \}
+
+ //! \name Static Functions
+ //! \{
+
+ static inline bool isGp(const Operand_& op) noexcept {
+ // Check operand type and register group. Not interested in register type and size.
+ const uint32_t kSgn = (kOpReg << kSignatureOpShift ) |
+ (kGroupGp << kSignatureRegGroupShift) ;
+ return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn;
+ }
+
+ //! Tests whether the `op` operand is either a low or high 8-bit GPB register.
+ static inline bool isVec(const Operand_& op) noexcept {
+ // Check operand type and register group. Not interested in register type and size.
+ const uint32_t kSgn = (kOpReg << kSignatureOpShift ) |
+ (kGroupVec << kSignatureRegGroupShift) ;
+ return (op.signature() & (kSignatureOpMask | kSignatureRegGroupMask)) == kSgn;
+ }
+
+ static inline bool isGp(const Operand_& op, uint32_t rId) noexcept { return isGp(op) & (op.id() == rId); }
+ static inline bool isVec(const Operand_& op, uint32_t rId) noexcept { return isVec(op) & (op.id() == rId); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RegOnly]
+// ============================================================================
+
+//! RegOnly is 8-byte version of `BaseReg` that allows to store either register
+//! or nothing.
+//!
+//! This class was designed to decrease the space consumed by each extra "operand"
+//! in `BaseEmitter` and `InstNode` classes.
+struct RegOnly {
+ //! Type of the operand, either `kOpNone` or `kOpReg`.
+ uint32_t _signature;
+ //! Physical or virtual register id.
+ uint32_t _id;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Initializes the `RegOnly` instance to hold register `signature` and `id`.
+ inline void init(uint32_t signature, uint32_t id) noexcept {
+ _signature = signature;
+ _id = id;
+ }
+
+ inline void init(const BaseReg& reg) noexcept { init(reg.signature(), reg.id()); }
+ inline void init(const RegOnly& reg) noexcept { init(reg.signature(), reg.id()); }
+
+ //! Resets the `RegOnly` members to zeros (none).
+ inline void reset() noexcept { init(0, 0); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this ExtraReg is none (same as calling `Operand_::isNone()`).
+ constexpr bool isNone() const noexcept { return _signature == 0; }
+ //! Tests whether the register is valid (either virtual or physical).
+ constexpr bool isReg() const noexcept { return _signature != 0; }
+
+ //! Tests whether this is a physical register.
+ constexpr bool isPhysReg() const noexcept { return _id < BaseReg::kIdBad; }
+ //! Tests whether this is a virtual register (used by `BaseCompiler`).
+ constexpr bool isVirtReg() const noexcept { return _id > BaseReg::kIdBad; }
+
+ //! Returns the register signature or 0 if no register is assigned.
+ constexpr uint32_t signature() const noexcept { return _signature; }
+ //! Returns the register id.
+ //!
+ //! \note Always check whether the register is assigned before using the
+ //! returned identifier as non-assigned `RegOnly` instance would return
+ //! zero id, which is still a valid register id.
+ constexpr uint32_t id() const noexcept { return _id; }
+
+ //! Sets the register id.
+ inline void setId(uint32_t id) noexcept { _id = id; }
+
+ //! \cond INTERNAL
+ //!
+ //! Extracts information from operand's signature.
+ template<uint32_t mask>
+ constexpr uint32_t _getSignaturePart() const noexcept {
+ return (_signature >> Support::constCtz(mask)) & (mask >> Support::constCtz(mask));
+ }
+ //! \endcond
+
+ //! Returns the type of the register.
+ constexpr uint32_t type() const noexcept { return _getSignaturePart<Operand::kSignatureRegTypeMask>(); }
+ //! Returns the register group.
+ constexpr uint32_t group() const noexcept { return _getSignaturePart<Operand::kSignatureRegGroupMask>(); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Converts this ExtraReg to a real `RegT` operand.
+ template<typename RegT>
+ constexpr RegT toReg() const noexcept { return RegT(_signature, _id); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::BaseMem]
+// ============================================================================
+
+//! Base class for all memory operands.
+//!
+//! \note It's tricky to pack all possible cases that define a memory operand
+//! into just 16 bytes. The `BaseMem` splits data into the following parts:
+//!
+//! BASE - Base register or label - requires 36 bits total. 4 bits are used to
+//! encode the type of the BASE operand (label vs. register type) and
+//! the remaining 32 bits define the BASE id, which can be a physical or
+//! virtual register index. If BASE type is zero, which is never used as
+//! a register-type and label doesn't use it as well then BASE field
+//! contains a high DWORD of a possible 64-bit absolute address, which is
+//! possible on X64.
+//!
+//! INDEX - Index register (or theoretically Label, which doesn't make sense).
+//! Encoding is similar to BASE - it also requires 36 bits and splits
+//! the encoding to INDEX type (4 bits defining the register type) and
+//! id (32-bits).
+//!
+//! OFFSET - A relative offset of the address. Basically if BASE is specified
+//! the relative displacement adjusts BASE and an optional INDEX. if
+//! BASE is not specified then the OFFSET should be considered as ABSOLUTE
+//! address (at least on X86). In that case its low 32 bits are stored in
+//! DISPLACEMENT field and the remaining high 32 bits are stored in BASE.
+//!
+//! OTHER - There is rest 8 bits that can be used for whatever purpose. The
+//! x86::Mem operand uses these bits to store segment override prefix and
+//! index shift (scale).
+class BaseMem : public Operand {
+public:
+ enum AddrType : uint32_t {
+ kAddrTypeDefault = 0,
+ kAddrTypeAbs = 1,
+ kAddrTypeRel = 2
+ };
+
+ // Shortcuts.
+ enum SignatureMem : uint32_t {
+ kSignatureMemAbs = kAddrTypeAbs << kSignatureMemAddrTypeShift,
+ kSignatureMemRel = kAddrTypeRel << kSignatureMemAddrTypeShift
+ };
+
+ //! \cond INTERNAL
+ //! Used internally to construct `BaseMem` operand from decomposed data.
+ struct Decomposed {
+ uint32_t baseType;
+ uint32_t baseId;
+ uint32_t indexType;
+ uint32_t indexId;
+ int32_t offset;
+ uint32_t size;
+ uint32_t flags;
+ };
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a default `BaseMem` operand, that points to [0].
+ constexpr BaseMem() noexcept
+ : Operand(Globals::Init, kOpMem, 0, 0, 0) {}
+
+ //! Creates a `BaseMem` operand that is a clone of `other`.
+ constexpr BaseMem(const BaseMem& other) noexcept
+ : Operand(other) {}
+
+ //! \cond INTERNAL
+
+ //! Creates a `BaseMem` operand from 4 integers as used by `Operand_` struct.
+ constexpr BaseMem(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept
+ : Operand(Globals::Init, u0, u1, u2, u3) {}
+
+ constexpr BaseMem(const Decomposed& d) noexcept
+ : Operand(Globals::Init,
+ kOpMem | (d.baseType << kSignatureMemBaseTypeShift )
+ | (d.indexType << kSignatureMemIndexTypeShift)
+ | (d.size << kSignatureSizeShift )
+ | d.flags,
+ d.baseId,
+ d.indexId,
+ uint32_t(d.offset)) {}
+
+ //! \endcond
+
+ //! Creates a completely uninitialized `BaseMem` operand.
+ inline explicit BaseMem(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! Resets the memory operand - after the reset the memory points to [0].
+ inline void reset() noexcept {
+ _signature = kOpMem;
+ _baseId = 0;
+ _data[0] = 0;
+ _data[1] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline BaseMem& operator=(const BaseMem& other) noexcept { copyFrom(other); return *this; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Clones the memory operand.
+ constexpr BaseMem clone() const noexcept { return BaseMem(*this); }
+
+ constexpr uint32_t addrType() const noexcept { return _getSignaturePart<kSignatureMemAddrTypeMask>(); }
+ inline void setAddrType(uint32_t addrType) noexcept { _setSignaturePart<kSignatureMemAddrTypeMask>(addrType); }
+ inline void resetAddrType() noexcept { _setSignaturePart<kSignatureMemAddrTypeMask>(0); }
+
+ constexpr bool isAbs() const noexcept { return addrType() == kAddrTypeAbs; }
+ inline void setAbs() noexcept { setAddrType(kAddrTypeAbs); }
+
+ constexpr bool isRel() const noexcept { return addrType() == kAddrTypeRel; }
+ inline void setRel() noexcept { setAddrType(kAddrTypeRel); }
+
+ constexpr bool isRegHome() const noexcept { return _hasSignaturePart<kSignatureMemRegHomeFlag>(); }
+ inline void setRegHome() noexcept { _signature |= kSignatureMemRegHomeFlag; }
+ inline void clearRegHome() noexcept { _signature &= ~kSignatureMemRegHomeFlag; }
+
+ //! Tests whether the memory operand has a BASE register or label specified.
+ constexpr bool hasBase() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0; }
+ //! Tests whether the memory operand has an INDEX register specified.
+ constexpr bool hasIndex() const noexcept { return (_signature & kSignatureMemIndexTypeMask) != 0; }
+ //! Tests whether the memory operand has BASE and INDEX register.
+ constexpr bool hasBaseOrIndex() const noexcept { return (_signature & kSignatureMemBaseIndexMask) != 0; }
+ //! Tests whether the memory operand has BASE and INDEX register.
+ constexpr bool hasBaseAndIndex() const noexcept { return (_signature & kSignatureMemBaseTypeMask) != 0 && (_signature & kSignatureMemIndexTypeMask) != 0; }
+
+ //! Tests whether the BASE operand is a register (registers start after `kLabelTag`).
+ constexpr bool hasBaseReg() const noexcept { return (_signature & kSignatureMemBaseTypeMask) > (Label::kLabelTag << kSignatureMemBaseTypeShift); }
+ //! Tests whether the BASE operand is a label.
+ constexpr bool hasBaseLabel() const noexcept { return (_signature & kSignatureMemBaseTypeMask) == (Label::kLabelTag << kSignatureMemBaseTypeShift); }
+ //! Tests whether the INDEX operand is a register (registers start after `kLabelTag`).
+ constexpr bool hasIndexReg() const noexcept { return (_signature & kSignatureMemIndexTypeMask) > (Label::kLabelTag << kSignatureMemIndexTypeShift); }
+
+ //! Returns the type of the BASE register (0 if this memory operand doesn't
+ //! use the BASE register).
+ //!
+ //! \note If the returned type is one (a value never associated to a register
+ //! type) the BASE is not register, but it's a label. One equals to `kLabelTag`.
+ //! You should always check `hasBaseLabel()` before using `baseId()` result.
+ constexpr uint32_t baseType() const noexcept { return _getSignaturePart<kSignatureMemBaseTypeMask>(); }
+
+ //! Returns the type of an INDEX register (0 if this memory operand doesn't
+ //! use the INDEX register).
+ constexpr uint32_t indexType() const noexcept { return _getSignaturePart<kSignatureMemIndexTypeMask>(); }
+
+ //! This is used internally for BASE+INDEX validation.
+ constexpr uint32_t baseAndIndexTypes() const noexcept { return _getSignaturePart<kSignatureMemBaseIndexMask>(); }
+
+ //! Returns both BASE (4:0 bits) and INDEX (9:5 bits) types combined into a
+ //! single value.
+ //!
+ //! \remarks Returns id of the BASE register or label (if the BASE was
+ //! specified as label).
+ constexpr uint32_t baseId() const noexcept { return _baseId; }
+
+ //! Returns the id of the INDEX register.
+ constexpr uint32_t indexId() const noexcept { return _data[kDataMemIndexId]; }
+
+ //! Sets the id of the BASE register (without modifying its type).
+ inline void setBaseId(uint32_t rId) noexcept { _baseId = rId; }
+ //! Sets the id of the INDEX register (without modifying its type).
+ inline void setIndexId(uint32_t rId) noexcept { _data[kDataMemIndexId] = rId; }
+
+ //! Sets the base register to type and id of the given `base` operand.
+ inline void setBase(const BaseReg& base) noexcept { return _setBase(base.type(), base.id()); }
+ //! Sets the index register to type and id of the given `index` operand.
+ inline void setIndex(const BaseReg& index) noexcept { return _setIndex(index.type(), index.id()); }
+
+ inline void _setBase(uint32_t rType, uint32_t rId) noexcept {
+ _setSignaturePart<kSignatureMemBaseTypeMask>(rType);
+ _baseId = rId;
+ }
+
+ inline void _setIndex(uint32_t rType, uint32_t rId) noexcept {
+ _setSignaturePart<kSignatureMemIndexTypeMask>(rType);
+ _data[kDataMemIndexId] = rId;
+ }
+
+ //! Resets the memory operand's BASE register or label.
+ inline void resetBase() noexcept { _setBase(0, 0); }
+ //! Resets the memory operand's INDEX register.
+ inline void resetIndex() noexcept { _setIndex(0, 0); }
+
+ //! Sets the memory operand size (in bytes).
+ inline void setSize(uint32_t size) noexcept { _setSignaturePart<kSignatureSizeMask>(size); }
+
+ //! Tests whether the memory operand has a 64-bit offset or absolute address.
+ //!
+ //! If this is true then `hasBase()` must always report false.
+ constexpr bool isOffset64Bit() const noexcept { return baseType() == 0; }
+
+ //! Tests whether the memory operand has a non-zero offset or absolute address.
+ constexpr bool hasOffset() const noexcept {
+ return (_data[kDataMemOffsetLo] | uint32_t(_baseId & Support::bitMaskFromBool<uint32_t>(isOffset64Bit()))) != 0;
+ }
+
+ //! Returns either relative offset or absolute address as 64-bit integer.
+ constexpr int64_t offset() const noexcept {
+ return isOffset64Bit() ? int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32))
+ : int64_t(int32_t(_data[kDataMemOffsetLo])); // Sign extend 32-bit offset.
+ }
+
+ //! Returns a 32-bit low part of a 64-bit offset or absolute address.
+ constexpr int32_t offsetLo32() const noexcept { return int32_t(_data[kDataMemOffsetLo]); }
+ //! Returns a 32-but high part of a 64-bit offset or absolute address.
+ //!
+ //! \note This function is UNSAFE and returns garbage if `isOffset64Bit()`
+ //! returns false. Never use it blindly without checking it first.
+ constexpr int32_t offsetHi32() const noexcept { return int32_t(_baseId); }
+
+ //! Sets a 64-bit offset or an absolute address to `offset`.
+ //!
+ //! \note This functions attempts to set both high and low parts of a 64-bit
+ //! offset, however, if the operand has a BASE register it will store only the
+ //! low 32 bits of the offset / address as there is no way to store both BASE
+ //! and 64-bit offset, and there is currently no architecture that has such
+ //! capability targeted by AsmJit.
+ inline void setOffset(int64_t offset) noexcept {
+ uint32_t lo = uint32_t(uint64_t(offset) & 0xFFFFFFFFu);
+ uint32_t hi = uint32_t(uint64_t(offset) >> 32);
+ uint32_t hiMsk = Support::bitMaskFromBool<uint32_t>(isOffset64Bit());
+
+ _data[kDataMemOffsetLo] = lo;
+ _baseId = (hi & hiMsk) | (_baseId & ~hiMsk);
+ }
+ //! Sets a low 32-bit offset to `offset` (don't use without knowing how BaseMem works).
+ inline void setOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] = uint32_t(offset); }
+
+ //! Adjusts the offset by `offset`.
+ //!
+ //! \note This is a fast function that doesn't use the HI 32-bits of a
+ //! 64-bit offset. Use it only if you know that there is a BASE register
+ //! and the offset is only 32 bits anyway.
+
+ //! Adjusts the offset by a 64-bit `offset`.
+ inline void addOffset(int64_t offset) noexcept {
+ if (isOffset64Bit()) {
+ int64_t result = offset + int64_t(uint64_t(_data[kDataMemOffsetLo]) | (uint64_t(_baseId) << 32));
+ _data[kDataMemOffsetLo] = uint32_t(uint64_t(result) & 0xFFFFFFFFu);
+ _baseId = uint32_t(uint64_t(result) >> 32);
+ }
+ else {
+ _data[kDataMemOffsetLo] += uint32_t(uint64_t(offset) & 0xFFFFFFFFu);
+ }
+ }
+
+ //! Adds `offset` to a low 32-bit offset part (don't use without knowing how
+ //! BaseMem works).
+ inline void addOffsetLo32(int32_t offset) noexcept { _data[kDataMemOffsetLo] += uint32_t(offset); }
+
+ //! Resets the memory offset to zero.
+ inline void resetOffset() noexcept { setOffset(0); }
+
+ //! Resets the lo part of the memory offset to zero (don't use without knowing
+ //! how BaseMem works).
+ inline void resetOffsetLo32() noexcept { setOffsetLo32(0); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Imm]
+// ============================================================================
+
+//! Immediate operand.
+//!
+//! Immediate operand is usually part of instruction itself. It's inlined after
+//! or before the instruction opcode. Immediates can be only signed or unsigned
+//! integers.
+//!
+//! To create an immediate operand use `asmjit::imm()` helper, which can be used
+//! with any type, not just the default 64-bit int.
+class Imm : public Operand {
+public:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new immediate value (initial value is 0).
+ constexpr Imm() noexcept
+ : Operand(Globals::Init, kOpImm, 0, 0, 0) {}
+
+ //! Creates a new immediate value from `other`.
+ constexpr Imm(const Imm& other) noexcept
+ : Operand(other) {}
+
+ //! Creates a new signed immediate value, assigning the value to `val`.
+ constexpr explicit Imm(int64_t val) noexcept
+ : Operand(Globals::Init, kOpImm, 0, Support::unpackU32At0(val), Support::unpackU32At1(val)) {}
+
+ inline explicit Imm(Globals::NoInit_) noexcept
+ : Operand(Globals::NoInit) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ //! Assigns the value of the `other` operand to this immediate.
+ inline Imm& operator=(const Imm& other) noexcept { copyFrom(other); return *this; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns immediate value as 8-bit signed integer, possibly cropped.
+ constexpr int8_t i8() const noexcept { return int8_t(_data[kDataImmValueLo] & 0xFFu); }
+ //! Returns immediate value as 8-bit unsigned integer, possibly cropped.
+ constexpr uint8_t u8() const noexcept { return uint8_t(_data[kDataImmValueLo] & 0xFFu); }
+ //! Returns immediate value as 16-bit signed integer, possibly cropped.
+ constexpr int16_t i16() const noexcept { return int16_t(_data[kDataImmValueLo] & 0xFFFFu);}
+ //! Returns immediate value as 16-bit unsigned integer, possibly cropped.
+ constexpr uint16_t u16() const noexcept { return uint16_t(_data[kDataImmValueLo] & 0xFFFFu);}
+ //! Returns immediate value as 32-bit signed integer, possibly cropped.
+ constexpr int32_t i32() const noexcept { return int32_t(_data[kDataImmValueLo]); }
+ //! Returns low 32-bit signed integer.
+ constexpr int32_t i32Lo() const noexcept { return int32_t(_data[kDataImmValueLo]); }
+ //! Returns high 32-bit signed integer.
+ constexpr int32_t i32Hi() const noexcept { return int32_t(_data[kDataImmValueHi]); }
+ //! Returns immediate value as 32-bit unsigned integer, possibly cropped.
+ constexpr uint32_t u32() const noexcept { return _data[kDataImmValueLo]; }
+ //! Returns low 32-bit signed integer.
+ constexpr uint32_t u32Lo() const noexcept { return _data[kDataImmValueLo]; }
+ //! Returns high 32-bit signed integer.
+ constexpr uint32_t u32Hi() const noexcept { return _data[kDataImmValueHi]; }
+ //! Returns immediate value as 64-bit signed integer.
+ constexpr int64_t i64() const noexcept { return int64_t((uint64_t(_data[kDataImmValueHi]) << 32) | _data[kDataImmValueLo]); }
+ //! Returns immediate value as 64-bit unsigned integer.
+ constexpr uint64_t u64() const noexcept { return uint64_t(i64()); }
+ //! Returns immediate value as `intptr_t`, possibly cropped if size of `intptr_t` is 32 bits.
+ constexpr intptr_t iptr() const noexcept { return (sizeof(intptr_t) == sizeof(int64_t)) ? intptr_t(i64()) : intptr_t(i32()); }
+ //! Returns immediate value as `uintptr_t`, possibly cropped if size of `uintptr_t` is 32 bits.
+ constexpr uintptr_t uptr() const noexcept { return (sizeof(uintptr_t) == sizeof(uint64_t)) ? uintptr_t(u64()) : uintptr_t(u32()); }
+
+ //! Tests whether the immediate can be casted to 8-bit signed integer.
+ constexpr bool isInt8() const noexcept { return Support::isInt8(i64()); }
+ //! Tests whether the immediate can be casted to 8-bit unsigned integer.
+ constexpr bool isUInt8() const noexcept { return Support::isUInt8(i64()); }
+ //! Tests whether the immediate can be casted to 16-bit signed integer.
+ constexpr bool isInt16() const noexcept { return Support::isInt16(i64()); }
+ //! Tests whether the immediate can be casted to 16-bit unsigned integer.
+ constexpr bool isUInt16() const noexcept { return Support::isUInt16(i64()); }
+ //! Tests whether the immediate can be casted to 32-bit signed integer.
+ constexpr bool isInt32() const noexcept { return Support::isInt32(i64()); }
+ //! Tests whether the immediate can be casted to 32-bit unsigned integer.
+ constexpr bool isUInt32() const noexcept { return _data[kDataImmValueHi] == 0; }
+
+ //! Sets immediate value to 8-bit signed integer `val`.
+ inline void setI8(int8_t val) noexcept { setI64(val); }
+ //! Sets immediate value to 8-bit unsigned integer `val`.
+ inline void setU8(uint8_t val) noexcept { setU64(val); }
+ //! Sets immediate value to 16-bit signed integer `val`.
+ inline void setI16(int16_t val) noexcept { setI64(val); }
+ //! Sets immediate value to 16-bit unsigned integer `val`.
+ inline void setU16(uint16_t val) noexcept { setU64(val); }
+ //! Sets immediate value to 32-bit signed integer `val`.
+ inline void setI32(int32_t val) noexcept { setI64(val); }
+ //! Sets immediate value to 32-bit unsigned integer `val`.
+ inline void setU32(uint32_t val) noexcept { setU64(val); }
+ //! Sets immediate value to 64-bit signed integer `val`.
+ inline void setI64(int64_t val) noexcept {
+ _data[kDataImmValueHi] = uint32_t(uint64_t(val) >> 32);
+ _data[kDataImmValueLo] = uint32_t(uint64_t(val) & 0xFFFFFFFFu);
+ }
+ //! Sets immediate value to 64-bit unsigned integer `val`.
+ inline void setU64(uint64_t val) noexcept { setI64(int64_t(val)); }
+ //! Sets immediate value to intptr_t `val`.
+ inline void setIPtr(intptr_t val) noexcept { setI64(val); }
+ //! Sets immediate value to uintptr_t `val`.
+ inline void setUPtr(uintptr_t val) noexcept { setU64(val); }
+
+ //! Sets immediate value to `val`.
+ template<typename T>
+ inline void setValue(T val) noexcept { setI64(int64_t(Support::asNormalized(val))); }
+
+ inline void setDouble(double d) noexcept { setU64(Support::bitCast<uint64_t>(d)); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Clones the immediate operand.
+ constexpr Imm clone() const noexcept { return Imm(*this); }
+
+ inline void signExtend8Bits() noexcept { setI64(int64_t(i8())); }
+ inline void signExtend16Bits() noexcept { setI64(int64_t(i16())); }
+ inline void signExtend32Bits() noexcept { setI64(int64_t(i32())); }
+
+ inline void zeroExtend8Bits() noexcept { setU64(u8()); }
+ inline void zeroExtend16Bits() noexcept { setU64(u16()); }
+ inline void zeroExtend32Bits() noexcept { _data[kDataImmValueHi] = 0u; }
+
+ //! \}
+};
+
+//! Creates a new immediate operand.
+//!
+//! Using `imm(x)` is much nicer than using `Imm(x)` as this is a template
+//! which can accept any integer including pointers and function pointers.
+template<typename T>
+static constexpr Imm imm(T val) noexcept {
+ return Imm(std::is_signed<T>::value ? int64_t(val) : int64_t(uint64_t(val)));
+}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_OPERAND_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/osutils.cpp b/3rdparty/asmjit/src/asmjit/core/osutils.cpp
new file mode 100644
index 00000000000..e2f34efb33e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/osutils.cpp
@@ -0,0 +1,106 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/osutils.h"
+#include "../core/support.h"
+
+#if defined(_WIN32)
+ #include <atomic>
+#elif defined(__APPLE__)
+ #include <mach/mach_time.h>
+#else
+ #include <time.h>
+ #include <unistd.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::OSUtils - GetTickCount]
+// ============================================================================
+
+uint32_t OSUtils::getTickCount() noexcept {
+#if defined(_WIN32)
+ enum HiResStatus : uint32_t {
+ kHiResUnknown = 0,
+ kHiResAvailable = 1,
+ kHiResNotAvailable = 2
+ };
+
+ static std::atomic<uint32_t> _hiResStatus(kHiResUnknown);
+ static volatile double _hiResFreq(0);
+
+ uint32_t status = _hiResStatus.load();
+ LARGE_INTEGER now, qpf;
+
+ if (status != kHiResNotAvailable && ::QueryPerformanceCounter(&now)) {
+ double freq = _hiResFreq;
+ if (status == kHiResUnknown) {
+ // Detects the availability of high resolution counter.
+ if (::QueryPerformanceFrequency(&qpf)) {
+ freq = double(qpf.QuadPart) / 1000.0;
+ _hiResFreq = freq;
+ _hiResStatus.compare_exchange_strong(status, kHiResAvailable);
+ status = kHiResAvailable;
+ }
+ else {
+ // High resolution not available.
+ _hiResStatus.compare_exchange_strong(status, kHiResNotAvailable);
+ }
+ }
+
+ if (status == kHiResAvailable)
+ return uint32_t(uint64_t(int64_t(double(now.QuadPart) / freq)) & 0xFFFFFFFFu);
+ }
+
+ // Bail to `GetTickCount()` if we cannot use high resolution.
+ return ::GetTickCount();
+#elif defined(__APPLE__)
+ // See Apple's QA1398.
+ static mach_timebase_info_data_t _machTime;
+
+ uint32_t denom = _machTime.denom;
+ if (ASMJIT_UNLIKELY(!denom)) {
+ if (mach_timebase_info(&_machTime) != KERN_SUCCESS || !(denom = _machTime.denom))
+ return 0;
+ }
+
+ // `mach_absolute_time()` returns nanoseconds, we want milliseconds.
+ uint64_t t = mach_absolute_time() / 1000000u;
+ t = (t * _machTime.numer) / _machTime.denom;
+ return uint32_t(t & 0xFFFFFFFFu);
+#elif defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
+ struct timespec ts;
+ if (ASMJIT_UNLIKELY(clock_gettime(CLOCK_MONOTONIC, &ts) != 0))
+ return 0;
+
+ uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u);
+ return uint32_t(t & 0xFFFFFFFFu);
+#else
+ #pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.")
+ return 0;
+#endif
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/osutils.h b/3rdparty/asmjit/src/asmjit/core/osutils.h
new file mode 100644
index 00000000000..b9a2df4ad8f
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/osutils.h
@@ -0,0 +1,139 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
+#define ASMJIT_CORE_OSUTILS_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_support
+//! \{
+
+// ============================================================================
+// [asmjit::OSUtils]
+// ============================================================================
+
+//! Operating system utilities.
+namespace OSUtils {
+ //! Gets the current CPU tick count, used for benchmarking (1ms resolution).
+ ASMJIT_API uint32_t getTickCount() noexcept;
+};
+
+// ============================================================================
+// [asmjit::Lock]
+// ============================================================================
+
+//! \cond INTERNAL
+
+//! Lock.
+//!
+//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
+//! layout is exposed as it's used by some other public classes.
+class Lock {
+public:
+ ASMJIT_NONCOPYABLE(Lock)
+
+#if defined(_WIN32)
+#pragma pack(push, 8)
+ struct ASMJIT_MAY_ALIAS Handle {
+ void* DebugInfo;
+ long LockCount;
+ long RecursionCount;
+ void* OwningThread;
+ void* LockSemaphore;
+ unsigned long* SpinCount;
+ };
+ Handle _handle;
+#pragma pack(pop)
+#elif !defined(__EMSCRIPTEN__)
+ typedef pthread_mutex_t Handle;
+ Handle _handle;
+#endif
+
+ inline Lock() noexcept;
+ inline ~Lock() noexcept;
+
+ inline void lock() noexcept;
+ inline void unlock() noexcept;
+};
+
+#ifdef ASMJIT_EXPORTS
+#if defined(_WIN32)
+
+// Win32 implementation.
+static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
+static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
+
+inline Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+inline void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
+
+#elif !defined(__EMSCRIPTEN__)
+
+// PThread implementation.
+inline Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
+inline Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
+inline void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
+inline void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
+
+#else
+
+// Dummy implementation - Emscripten or other unsupported platform.
+inline Lock::Lock() noexcept {}
+inline Lock::~Lock() noexcept {}
+inline void Lock::lock() noexcept {}
+inline void Lock::unlock() noexcept {}
+
+#endif
+#endif
+
+//! \endcond
+
+// ============================================================================
+// [asmjit::LockGuard]
+// ============================================================================
+
+#ifdef ASMJIT_EXPORTS
+//! \cond INTERNAL
+
+//! Scoped lock.
+struct LockGuard {
+ ASMJIT_NONCOPYABLE(LockGuard)
+
+ Lock& _target;
+
+ inline LockGuard(Lock& target) noexcept : _target(target) { _target.lock(); }
+ inline ~LockGuard() noexcept { _target.unlock(); }
+};
+
+//! \endcond
+#endif
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_OSUTILS_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/raassignment_p.h b/3rdparty/asmjit/src/asmjit/core/raassignment_p.h
new file mode 100644
index 00000000000..2618afd0ece
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/raassignment_p.h
@@ -0,0 +1,399 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
+#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/radefs_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RAAssignment]
+// ============================================================================
+
+class RAAssignment {
+ ASMJIT_NONCOPYABLE(RAAssignment)
+
+public:
+ enum Ids : uint32_t {
+ kPhysNone = 0xFF,
+ kWorkNone = RAWorkReg::kIdNone
+ };
+
+ enum DirtyBit : uint32_t {
+ kClean = 0,
+ kDirty = 1
+ };
+
+ struct Layout {
+ inline void reset() noexcept {
+ physIndex.reset();
+ physCount.reset();
+ physTotal = 0;
+ workCount = 0;
+ workRegs = nullptr;
+ }
+
+ RARegIndex physIndex; //!< Index of architecture registers per group.
+ RARegCount physCount; //!< Count of architecture registers per group.
+ uint32_t physTotal; //!< Count of physical registers of all groups.
+ uint32_t workCount; //!< Count of work registers.
+ const RAWorkRegs* workRegs; //!< WorkRegs data (vector).
+ };
+
+ struct PhysToWorkMap {
+ static inline size_t sizeOf(uint32_t count) noexcept {
+ return sizeof(PhysToWorkMap) - sizeof(uint32_t) + size_t(count) * sizeof(uint32_t);
+ }
+
+ inline void reset(uint32_t count) noexcept {
+ assigned.reset();
+ dirty.reset();
+
+ for (uint32_t i = 0; i < count; i++)
+ workIds[i] = kWorkNone;
+ }
+
+ inline void copyFrom(const PhysToWorkMap* other, uint32_t count) noexcept {
+ size_t size = sizeOf(count);
+ memcpy(this, other, size);
+ }
+
+ RARegMask assigned; //!< Assigned registers (each bit represents one physical reg).
+ RARegMask dirty; //!< Dirty registers (spill slot out of sync or no spill slot).
+ uint32_t workIds[1 /* ... */]; //!< PhysReg to WorkReg mapping.
+ };
+
+ struct WorkToPhysMap {
+ static inline size_t sizeOf(uint32_t count) noexcept {
+ return size_t(count) * sizeof(uint8_t);
+ }
+
+ inline void reset(uint32_t count) noexcept {
+ for (uint32_t i = 0; i < count; i++)
+ physIds[i] = kPhysNone;
+ }
+
+ inline void copyFrom(const WorkToPhysMap* other, uint32_t count) noexcept {
+ size_t size = sizeOf(count);
+ if (ASMJIT_LIKELY(size))
+ memcpy(this, other, size);
+ }
+
+ uint8_t physIds[1 /* ... */]; //!< WorkReg to PhysReg mapping
+ };
+
+ //! Physical registers layout.
+ Layout _layout;
+ //! WorkReg to PhysReg mapping.
+ WorkToPhysMap* _workToPhysMap;
+ //! PhysReg to WorkReg mapping and assigned/dirty bits.
+ PhysToWorkMap* _physToWorkMap;
+ //! Optimization to translate PhysRegs to WorkRegs faster.
+ uint32_t* _physToWorkIds[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAAssignment() noexcept {
+ _layout.reset();
+ resetMaps();
+ }
+
+ inline void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
+ // Layout must be initialized before data.
+ ASMJIT_ASSERT(_physToWorkMap == nullptr);
+ ASMJIT_ASSERT(_workToPhysMap == nullptr);
+
+ _layout.physIndex.buildIndexes(physCount);
+ _layout.physCount = physCount;
+ _layout.physTotal = uint32_t(_layout.physIndex[BaseReg::kGroupVirt - 1]) +
+ uint32_t(_layout.physCount[BaseReg::kGroupVirt - 1]) ;
+ _layout.workCount = workRegs.size();
+ _layout.workRegs = &workRegs;
+ }
+
+ inline void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _physToWorkMap = physToWorkMap;
+ _workToPhysMap = workToPhysMap;
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ _physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
+ }
+
+ inline void resetMaps() noexcept {
+ _physToWorkMap = nullptr;
+ _workToPhysMap = nullptr;
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ _physToWorkIds[group] = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
+
+ inline RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
+ inline const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
+ inline uint32_t assigned(uint32_t group) const noexcept { return _physToWorkMap->assigned[group]; }
+
+ inline RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
+ inline const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
+ inline uint32_t dirty(uint32_t group) const noexcept { return _physToWorkMap->dirty[group]; }
+
+ inline uint32_t workToPhysId(uint32_t group, uint32_t workId) const noexcept {
+ DebugUtils::unused(group);
+ ASMJIT_ASSERT(workId != kWorkNone);
+ ASMJIT_ASSERT(workId < _layout.workCount);
+ return _workToPhysMap->physIds[workId];
+ }
+
+ inline uint32_t physToWorkId(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return _physToWorkIds[group][physId];
+ }
+
+ inline bool isPhysAssigned(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return Support::bitTest(_physToWorkMap->assigned[group], physId);
+ }
+
+ inline bool isPhysDirty(uint32_t group, uint32_t physId) const noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ return Support::bitTest(_physToWorkMap->dirty[group], physId);
+ }
+
+ //! \}
+
+ //! \name Assignment
+ //! \{
+
+ // These are low-level allocation helpers that are used to update the current
+ // mappings between physical and virt/work registers and also to update masks
+ // that represent allocated and dirty registers. These functions don't emit
+ // any code; they are only used to update and keep all mappings in sync.
+
+ //! Assign [VirtReg/WorkReg] to a physical register.
+ ASMJIT_INLINE void assign(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
+ ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
+ ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
+ ASMJIT_ASSERT(!isPhysAssigned(group, physId));
+ ASMJIT_ASSERT(!isPhysDirty(group, physId));
+
+ _workToPhysMap->physIds[workId] = uint8_t(physId);
+ _physToWorkIds[group][physId] = workId;
+
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->assigned[group] |= regMask;
+ _physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
+
+ verify();
+ }
+
+ //! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
+ ASMJIT_INLINE void reassign(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
+ ASMJIT_ASSERT(dstPhysId != srcPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
+ ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
+ ASMJIT_ASSERT(isPhysAssigned(group, srcPhysId) == true);
+ ASMJIT_ASSERT(isPhysAssigned(group, dstPhysId) == false);
+
+ _workToPhysMap->physIds[workId] = uint8_t(dstPhysId);
+ _physToWorkIds[group][srcPhysId] = kWorkNone;
+ _physToWorkIds[group][dstPhysId] = workId;
+
+ uint32_t srcMask = Support::bitMask(srcPhysId);
+ uint32_t dstMask = Support::bitMask(dstPhysId);
+
+ uint32_t dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
+ uint32_t regMask = dstMask | srcMask;
+
+ _physToWorkMap->assigned[group] ^= regMask;
+ _physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
+
+ verify();
+ }
+
+ ASMJIT_INLINE void swap(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
+ ASMJIT_ASSERT(aPhysId != bPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
+ ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
+ ASMJIT_ASSERT(physToWorkId(group, aPhysId) == aWorkId);
+ ASMJIT_ASSERT(physToWorkId(group, bPhysId) == bWorkId);
+ ASMJIT_ASSERT(isPhysAssigned(group, aPhysId));
+ ASMJIT_ASSERT(isPhysAssigned(group, bPhysId));
+
+ _workToPhysMap->physIds[aWorkId] = uint8_t(bPhysId);
+ _workToPhysMap->physIds[bWorkId] = uint8_t(aPhysId);
+ _physToWorkIds[group][aPhysId] = bWorkId;
+ _physToWorkIds[group][bPhysId] = aWorkId;
+
+ uint32_t aMask = Support::bitMask(aPhysId);
+ uint32_t bMask = Support::bitMask(bPhysId);
+
+ uint32_t flipMask = Support::bitMaskFromBool<uint32_t>(
+ ((_physToWorkMap->dirty[group] & aMask) != 0) ^
+ ((_physToWorkMap->dirty[group] & bMask) != 0));
+
+ uint32_t regMask = aMask | bMask;
+ _physToWorkMap->dirty[group] ^= regMask & flipMask;
+
+ verify();
+ }
+
+ //! Unassign [VirtReg/WorkReg] from a physical register.
+ ASMJIT_INLINE void unassign(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
+ ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
+ ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
+ ASMJIT_ASSERT(isPhysAssigned(group, physId));
+
+ _workToPhysMap->physIds[workId] = kPhysNone;
+ _physToWorkIds[group][physId] = kWorkNone;
+
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->assigned[group] &= ~regMask;
+ _physToWorkMap->dirty[group] &= ~regMask;
+
+ verify();
+ }
+
+ inline void makeClean(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ DebugUtils::unused(workId);
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->dirty[group] &= ~regMask;
+ }
+
+ inline void makeDirty(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ DebugUtils::unused(workId);
+ uint32_t regMask = Support::bitMask(physId);
+ _physToWorkMap->dirty[group] |= regMask;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(RAAssignment& other) noexcept {
+ std::swap(_workToPhysMap, other._workToPhysMap);
+ std::swap(_physToWorkMap, other._physToWorkMap);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ std::swap(_physToWorkIds[group], other._physToWorkIds[group]);
+ }
+
+ inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
+ memcpy(_physToWorkMap, physToWorkMap, PhysToWorkMap::sizeOf(_layout.physTotal));
+ memcpy(_workToPhysMap, workToPhysMap, WorkToPhysMap::sizeOf(_layout.workCount));
+ }
+
+ inline void copyFrom(const RAAssignment& other) noexcept {
+ copyFrom(other.physToWorkMap(), other.workToPhysMap());
+ }
+
+ // Not really useful outside of debugging.
+ bool equals(const RAAssignment& other) const noexcept {
+ // Layout should always match.
+ if (_layout.physIndex != other._layout.physIndex ||
+ _layout.physCount != other._layout.physCount ||
+ _layout.physTotal != other._layout.physTotal ||
+ _layout.workCount != other._layout.workCount ||
+ _layout.workRegs != other._layout.workRegs)
+ return false;
+
+ uint32_t physTotal = _layout.physTotal;
+ uint32_t workCount = _layout.workCount;
+
+ for (uint32_t physId = 0; physId < physTotal; physId++) {
+ uint32_t thisWorkId = _physToWorkMap->workIds[physId];
+ uint32_t otherWorkId = other._physToWorkMap->workIds[physId];
+ if (thisWorkId != otherWorkId)
+ return false;
+ }
+
+ for (uint32_t workId = 0; workId < workCount; workId++) {
+ uint32_t thisPhysId = _workToPhysMap->physIds[workId];
+ uint32_t otherPhysId = other._workToPhysMap->physIds[workId];
+ if (thisPhysId != otherPhysId)
+ return false;
+ }
+
+ if (_physToWorkMap->assigned != other._physToWorkMap->assigned ||
+ _physToWorkMap->dirty != other._physToWorkMap->dirty )
+ return false;
+
+ return true;
+ }
+
+#if defined(ASMJIT_BUILD_DEBUG)
+ ASMJIT_NOINLINE void verify() noexcept {
+ // Verify WorkToPhysMap.
+ {
+ for (uint32_t workId = 0; workId < _layout.workCount; workId++) {
+ uint32_t physId = _workToPhysMap->physIds[workId];
+ if (physId != kPhysNone) {
+ const RAWorkReg* workReg = _layout.workRegs->at(workId);
+ uint32_t group = workReg->group();
+ ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
+ }
+ }
+ }
+
+ // Verify PhysToWorkMap.
+ {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ uint32_t physCount = _layout.physCount[group];
+ for (uint32_t physId = 0; physId < physCount; physId++) {
+ uint32_t workId = _physToWorkIds[group][physId];
+ if (workId != kWorkNone) {
+ ASMJIT_ASSERT(_workToPhysMap->physIds[workId] == physId);
+ }
+ }
+ }
+ }
+ }
+#else
+ inline void verify() noexcept {}
+#endif
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/rabuilders_p.h b/3rdparty/asmjit/src/asmjit/core/rabuilders_p.h
new file mode 100644
index 00000000000..6f400ad1c3a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/rabuilders_p.h
@@ -0,0 +1,632 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
+#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/rapass_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RACFGBuilder]
+// ============================================================================
+
+template<typename This>
+class RACFGBuilder {
+public:
+ RAPass* _pass;
+ BaseCompiler* _cc;
+
+ RABlock* _curBlock;
+ RABlock* _retBlock;
+ FuncNode* _funcNode;
+ RARegsStats _blockRegStats;
+ uint32_t _exitLabelId;
+ ZoneVector<uint32_t> _sharedAssignmentsMap;
+
+ // Only used by logging, it's fine to be here to prevent more #ifdefs...
+ bool _hasCode;
+ RABlock* _lastLoggedBlock;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* _logger;
+ uint32_t _logFlags;
+ StringTmp<512> _sb;
+#endif
+
+ static constexpr uint32_t kRootIndentation = 2;
+ static constexpr uint32_t kCodeIndentation = 4;
+
+ // NOTE: This is a bit hacky. There are some nodes which are processed twice
+ // (see `onBeforeCall()` and `onBeforeRet()`) as they can insert some nodes
+ // around them. Since we don't have any flags to mark these we just use their
+ // position that is [at that time] unassigned.
+ static constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
+
+ inline RACFGBuilder(RAPass* pass) noexcept
+ : _pass(pass),
+ _cc(pass->cc()),
+ _curBlock(nullptr),
+ _retBlock(nullptr),
+ _funcNode(nullptr),
+ _blockRegStats{},
+ _exitLabelId(Globals::kInvalidId),
+ _hasCode(false),
+ _lastLoggedBlock(nullptr) {
+#ifndef ASMJIT_NO_LOGGING
+ _logger = _pass->debugLogger();
+ _logFlags = FormatOptions::kFlagPositions;
+
+ if (_logger)
+ _logFlags |= _logger->flags();
+#endif
+ }
+
+ inline BaseCompiler* cc() const noexcept { return _cc; }
+
+ // --------------------------------------------------------------------------
+ // [Run]
+ // --------------------------------------------------------------------------
+
+ //! Called per function by an architecture-specific CFG builder.
+ Error run() noexcept {
+ log("[RAPass::BuildCFG]\n");
+ ASMJIT_PROPAGATE(prepare());
+
+ logNode(_funcNode, kRootIndentation);
+ logBlock(_curBlock, kRootIndentation);
+
+ BaseNode* node = _funcNode->next();
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ _curBlock->setFirst(node);
+ _curBlock->setLast(node);
+
+ RAInstBuilder ib;
+ ZoneVector<RABlock*> blocksWithUnknownJumps;
+
+ for (;;) {
+ BaseNode* next = node->next();
+ ASMJIT_ASSERT(node->position() == 0 || node->position() == kNodePositionDidOnBefore);
+
+ if (node->isInst()) {
+ // Instruction | Jump | Invoke | Return
+ // ------------------------------------
+
+ // Handle `InstNode`, `FuncCallNode`, and `FuncRetNode`. All of them
+ // share the same interface that provides operands that have read/write
+ // semantics.
+ if (ASMJIT_UNLIKELY(!_curBlock)) {
+ // Unreachable code has to be removed, we cannot allocate registers
+ // in such code as we cannot do proper liveness analysis in such case.
+ removeNode(node);
+ node = next;
+ continue;
+ }
+
+ _hasCode = true;
+
+ if (node->isFuncCall() || node->isFuncRet()) {
+ if (node->position() != kNodePositionDidOnBefore) {
+ // Call and Reg are complicated as they may insert some surrounding
+ // code around them. The simplest approach is to get the previous
+ // node, call the `onBefore()` handlers and then check whether
+ // anything changed and restart if so. By restart we mean that the
+ // current `node` would go back to the first possible inserted node
+ // by `onBeforeCall()` or `onBeforeRet()`.
+ BaseNode* prev = node->prev();
+
+ if (node->type() == BaseNode::kNodeFuncCall)
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeCall(node->as<FuncCallNode>()));
+ else
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
+
+ if (prev != node->prev()) {
+ // If this was the first node in the block and something was
+ // inserted before it then we have to update the first block.
+ if (_curBlock->first() == node)
+ _curBlock->setFirst(prev->next());
+
+ node->setPosition(kNodePositionDidOnBefore);
+ node = prev->next();
+
+ // `onBeforeCall()` and `onBeforeRet()` can only insert instructions.
+ ASMJIT_ASSERT(node->isInst());
+ }
+
+ // Necessary if something was inserted after `node`, but nothing before.
+ next = node->next();
+ }
+ else {
+ // Change the position back to its original value.
+ node->setPosition(0);
+ }
+ }
+
+ InstNode* inst = node->as<InstNode>();
+ logNode(inst, kCodeIndentation);
+
+ uint32_t controlType = BaseInst::kControlNone;
+ ib.reset();
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, controlType, ib));
+
+ if (node->isFuncCall()) {
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onCall(inst->as<FuncCallNode>(), ib));
+ }
+
+ if (node->isFuncRet()) {
+ ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
+ controlType = BaseInst::kControlReturn;
+ }
+
+ if (controlType == BaseInst::kControlJump) {
+ uint32_t fixedRegCount = 0;
+ for (RATiedReg& tiedReg : ib) {
+ RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
+ if (workReg->group() == BaseReg::kGroupGp) {
+ uint32_t useId = tiedReg.useId();
+ if (useId == BaseReg::kIdBad) {
+ useId = _pass->_scratchRegIndexes[fixedRegCount++];
+ tiedReg.setUseId(useId);
+ }
+ _curBlock->addExitScratchGpRegs(Support::bitMask<uint32_t>(useId));
+ }
+ }
+ }
+
+ ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
+ _blockRegStats.combineWith(ib._stats);
+
+ if (controlType != BaseInst::kControlNone) {
+ // Support for conditional and unconditional jumps.
+ if (controlType == BaseInst::kControlJump || controlType == BaseInst::kControlBranch) {
+ _curBlock->setLast(node);
+ _curBlock->addFlags(RABlock::kFlagHasTerminator);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ if (!(inst->instOptions() & BaseInst::kOptionUnfollow)) {
+ // Jmp/Jcc/Call/Loop/etc...
+ uint32_t opCount = inst->opCount();
+ const Operand* opArray = inst->operands();
+
+ // Cannot jump anywhere without operands.
+ if (ASMJIT_UNLIKELY(!opCount))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (opArray[opCount - 1].isLabel()) {
+ // Labels are easy for constructing the control flow.
+ LabelNode* labelNode;
+ ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, opArray[opCount - 1].as<Label>()));
+
+ RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
+ if (ASMJIT_UNLIKELY(!targetBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
+ }
+ else {
+ // Not a label - could be jump with reg/mem operand, which
+ // means that it can go anywhere. Such jumps must either be
+ // annotated so the CFG can be properly constructed, otherwise
+ // we assume the worst case - can jump to every basic block.
+ JumpAnnotation* jumpAnnotation = nullptr;
+ if (inst->type() == BaseNode::kNodeJump)
+ jumpAnnotation = inst->as<JumpNode>()->annotation();
+
+ if (jumpAnnotation) {
+ uint64_t timestamp = _pass->nextTimestamp();
+ for (uint32_t id : jumpAnnotation->labelIds()) {
+ LabelNode* labelNode;
+ ASMJIT_PROPAGATE(cc()->labelNodeOf(&labelNode, id));
+
+ RABlock* targetBlock = _pass->newBlockOrExistingAt(labelNode);
+ if (ASMJIT_UNLIKELY(!targetBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Prevents adding basic-block successors multiple times.
+ if (!targetBlock->hasTimestamp(timestamp)) {
+ targetBlock->setTimestamp(timestamp);
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
+ }
+ }
+ ASMJIT_PROPAGATE(shareAssignmentAcrossSuccessors(_curBlock));
+ }
+ else {
+ ASMJIT_PROPAGATE(blocksWithUnknownJumps.append(_pass->allocator(), _curBlock));
+ }
+ }
+ }
+
+ if (controlType == BaseInst::kControlJump) {
+ // Unconditional jump makes the code after the jump unreachable,
+ // which will be removed instantly during the CFG construction;
+ // as we cannot allocate registers for instructions that are not
+ // part of any block. Of course we can leave these instructions
+ // as they are, however, that would only postpone the problem as
+ // assemblers can't encode instructions that use virtual registers.
+ _curBlock = nullptr;
+ }
+ else {
+ node = next;
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ RABlock* consecutiveBlock;
+ if (node->type() == BaseNode::kNodeLabel) {
+ if (node->hasPassData()) {
+ consecutiveBlock = node->passData<RABlock>();
+ }
+ else {
+ consecutiveBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutiveBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ node->setPassData<RABlock>(consecutiveBlock);
+ }
+ }
+ else {
+ consecutiveBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutiveBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
+
+ _curBlock = consecutiveBlock;
+ _hasCode = false;
+ _blockRegStats.reset();
+
+ if (_curBlock->isConstructed())
+ break;
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutiveBlock));
+
+ logBlock(_curBlock, kRootIndentation);
+ continue;
+ }
+ }
+
+ if (controlType == BaseInst::kControlReturn) {
+ _curBlock->setLast(node);
+ _curBlock->makeConstructed(_blockRegStats);
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
+
+ _curBlock = nullptr;
+ }
+ }
+ }
+ else if (node->type() == BaseNode::kNodeLabel) {
+ // Label - Basic-Block Management
+ // ------------------------------
+
+ if (!_curBlock) {
+ // If the current code is unreachable the label makes it reachable
+ // again. We may remove the whole block in the future if it's not
+ // referenced.
+ _curBlock = node->passData<RABlock>();
+
+ if (_curBlock) {
+ // If the label has a block assigned we can either continue with
+ // it or skip it if the block has been constructed already.
+ if (_curBlock->isConstructed())
+ break;
+ }
+ else {
+ // No block assigned, to create a new one, and assign it.
+ _curBlock = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!_curBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ node->setPassData<RABlock>(_curBlock);
+ }
+
+ _hasCode = false;
+ _blockRegStats.reset();
+ ASMJIT_PROPAGATE(_pass->addBlock(_curBlock));
+ }
+ else {
+ if (node->hasPassData()) {
+ RABlock* consecutive = node->passData<RABlock>();
+ if (_curBlock == consecutive) {
+ // The label currently processed is part of the current block. This
+ // is only possible for multiple labels that are right next to each
+ // other, or are separated by non-code nodes like directives and comments.
+ if (ASMJIT_UNLIKELY(_hasCode))
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+ else {
+ // Label makes the current block constructed. There is a chance that the
+ // Label is not used, but we don't know that at this point. In the worst
+ // case there would be two blocks next to each other, it's just fine.
+ ASMJIT_ASSERT(_curBlock->last() != node);
+ _curBlock->setLast(node->prev());
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
+
+ _curBlock = consecutive;
+ _hasCode = false;
+ _blockRegStats.reset();
+ }
+ }
+ else {
+ // First time we see this label.
+ if (_hasCode) {
+ // Cannot continue the current block if it already contains some
+ // code. We need to create a new block and make it a successor.
+ ASMJIT_ASSERT(_curBlock->last() != node);
+ _curBlock->setLast(node->prev());
+ _curBlock->addFlags(RABlock::kFlagHasConsecutive);
+ _curBlock->makeConstructed(_blockRegStats);
+
+ RABlock* consecutive = _pass->newBlock(node);
+ if (ASMJIT_UNLIKELY(!consecutive))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
+ ASMJIT_PROPAGATE(_pass->addBlock(consecutive));
+
+ _curBlock = consecutive;
+ _hasCode = false;
+ _blockRegStats.reset();
+ }
+
+ node->setPassData<RABlock>(_curBlock);
+ }
+ }
+
+ if (_curBlock && _curBlock != _lastLoggedBlock)
+ logBlock(_curBlock, kRootIndentation);
+ logNode(node, kRootIndentation);
+
+ // Unlikely: Assume that the exit label is reached only once per function.
+ if (ASMJIT_UNLIKELY(node->as<LabelNode>()->id() == _exitLabelId)) {
+ _curBlock->setLast(node);
+ _curBlock->makeConstructed(_blockRegStats);
+ ASMJIT_PROPAGATE(_pass->addExitBlock(_curBlock));
+
+ _curBlock = nullptr;
+ }
+ }
+ else {
+ // Other Nodes | Function Exit
+ // ---------------------------
+
+ logNode(node, kCodeIndentation);
+
+ if (node->type() == BaseNode::kNodeSentinel) {
+ if (node == _funcNode->endNode()) {
+ // Make sure we didn't flow here if this is the end of the function sentinel.
+ if (ASMJIT_UNLIKELY(_curBlock))
+ return DebugUtils::errored(kErrorInvalidState);
+ break;
+ }
+ }
+ else if (node->type() == BaseNode::kNodeFunc) {
+ // RAPass can only compile a single function at a time. If we
+ // encountered a function it must be the current one, bail if not.
+ if (ASMJIT_UNLIKELY(node != _funcNode))
+ return DebugUtils::errored(kErrorInvalidState);
+ // PASS if this is the first node.
+ }
+ else {
+ // PASS if this is a non-interesting or unknown node.
+ }
+ }
+
+ // Advance to the next node.
+ node = next;
+
+ // NOTE: We cannot encounter a NULL node, because every function must be
+ // terminated by a sentinel (`stop`) node. If we encountered a NULL node it
+ // means that something went wrong and this node list is corrupted; bail in
+ // such case.
+ if (ASMJIT_UNLIKELY(!node))
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ if (_pass->hasDanglingBlocks())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ for (RABlock* block : blocksWithUnknownJumps)
+ handleBlockWithUnknownJump(block);
+
+ return _pass->initSharedAssignments(_sharedAssignmentsMap);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Prepare]
+ // --------------------------------------------------------------------------
+
+ //! Prepares the CFG builder of the current function.
+ Error prepare() noexcept {
+ FuncNode* func = _pass->func();
+ BaseNode* node = nullptr;
+
+ // Create entry and exit blocks.
+ _funcNode = func;
+ _retBlock = _pass->newBlockOrExistingAt(func->exitNode(), &node);
+
+ if (ASMJIT_UNLIKELY(!_retBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ ASMJIT_PROPAGATE(_pass->addExitBlock(_retBlock));
+
+ if (node != func) {
+ _curBlock = _pass->newBlock();
+ if (ASMJIT_UNLIKELY(!_curBlock))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ else {
+ // Function that has no code at all.
+ _curBlock = _retBlock;
+ }
+
+ // Reset everything we may need.
+ _blockRegStats.reset();
+ _exitLabelId = func->exitNode()->id();
+
+ // Initially we assume there is no code in the function body.
+ _hasCode = false;
+
+ return _pass->addBlock(_curBlock);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Utilities]
+ // --------------------------------------------------------------------------
+
+ //! Called when a `node` is removed, e.g. bacause of a dead code elimination.
+ void removeNode(BaseNode* node) noexcept {
+ logNode(node, kRootIndentation, "<Removed>");
+ cc()->removeNode(node);
+ }
+
+ //! Handles block with unknown jump, which could be a jump to a jump table.
+ //!
+ //! If we encounter such block we basically insert all existing blocks as
+ //! successors except the function entry block and a natural successor, if
+ //! such block exists.
+ Error handleBlockWithUnknownJump(RABlock* block) noexcept {
+ RABlocks& blocks = _pass->blocks();
+ size_t blockCount = blocks.size();
+
+ // NOTE: Iterate from `1` as the first block is the entry block, we don't
+ // allow the entry to be a successor of block that ends with unknown jump.
+ RABlock* consecutive = block->consecutive();
+ for (size_t i = 1; i < blockCount; i++) {
+ RABlock* successor = blocks[i];
+ if (successor == consecutive)
+ continue;
+ block->appendSuccessor(successor);
+ }
+
+ return shareAssignmentAcrossSuccessors(block);
+ }
+
+ Error shareAssignmentAcrossSuccessors(RABlock* block) noexcept {
+ if (block->successors().size() <= 1)
+ return kErrorOk;
+
+ RABlock* consecutive = block->consecutive();
+ uint32_t sharedAssignmentId = Globals::kInvalidId;
+
+ for (RABlock* successor : block->successors()) {
+ if (successor == consecutive)
+ continue;
+
+ if (successor->hasSharedAssignmentId()) {
+ if (sharedAssignmentId == Globals::kInvalidId)
+ sharedAssignmentId = successor->sharedAssignmentId();
+ else
+ _sharedAssignmentsMap[successor->sharedAssignmentId()] = sharedAssignmentId;
+ }
+ else {
+ if (sharedAssignmentId == Globals::kInvalidId)
+ ASMJIT_PROPAGATE(newSharedAssignmentId(&sharedAssignmentId));
+ successor->setSharedAssignmentId(sharedAssignmentId);
+ }
+ }
+ return kErrorOk;
+ }
+
+ Error newSharedAssignmentId(uint32_t* out) noexcept {
+ uint32_t id = _sharedAssignmentsMap.size();
+ ASMJIT_PROPAGATE(_sharedAssignmentsMap.append(_pass->allocator(), id));
+
+ *out = id;
+ return kErrorOk;
+ }
+
+ // --------------------------------------------------------------------------
+ // [Logging]
+ // --------------------------------------------------------------------------
+
+#ifndef ASMJIT_NO_LOGGING
+ template<typename... Args>
+ inline void log(const char* fmt, Args&&... args) noexcept {
+ if (_logger)
+ _logger->logf(fmt, std::forward<Args>(args)...);
+ }
+
+ inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
+ if (_logger)
+ _logBlock(block, indentation);
+ }
+
+ inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
+ if (_logger)
+ _logNode(node, indentation, action);
+ }
+
+ void _logBlock(RABlock* block, uint32_t indentation) noexcept {
+ _sb.clear();
+ _sb.appendChars(' ', indentation);
+ _sb.appendFormat("{#%u}\n", block->blockId());
+ _logger->log(_sb);
+ _lastLoggedBlock = block;
+ }
+
+ void _logNode(BaseNode* node, uint32_t indentation, const char* action) noexcept {
+ _sb.clear();
+ _sb.appendChars(' ', indentation);
+ if (action) {
+ _sb.appendString(action);
+ _sb.appendChar(' ');
+ }
+ Logging::formatNode(_sb, _logFlags, cc(), node);
+ _sb.appendChar('\n');
+ _logger->log(_sb);
+ }
+#else
+ template<typename... Args>
+ inline void log(const char* fmt, Args&&... args) noexcept {
+ DebugUtils::unused(fmt);
+ DebugUtils::unused(std::forward<Args>(args)...);
+ }
+
+ inline void logBlock(RABlock* block, uint32_t indentation = 0) noexcept {
+ DebugUtils::unused(block, indentation);
+ }
+
+ inline void logNode(BaseNode* node, uint32_t indentation = 0, const char* action = nullptr) noexcept {
+ DebugUtils::unused(node, indentation, action);
+ }
+#endif
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/radefs_p.h b/3rdparty/asmjit/src/asmjit/core/radefs_p.h
new file mode 100644
index 00000000000..c63a1a33966
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/radefs_p.h
@@ -0,0 +1,1094 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RADEFS_P_H_INCLUDED
+#define ASMJIT_CORE_RADEFS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/compiler.h"
+#include "../core/logging.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+# define ASMJIT_RA_LOG_FORMAT(...) \
+ do { \
+ if (logger) \
+ logger->logf(__VA_ARGS__); \
+ } while (0)
+# define ASMJIT_RA_LOG_COMPLEX(...) \
+ do { \
+ if (logger) { \
+ __VA_ARGS__ \
+ } \
+ } while (0)
+#else
+# define ASMJIT_RA_LOG_FORMAT(...) ((void)0)
+# define ASMJIT_RA_LOG_COMPLEX(...) ((void)0)
+#endif
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class RAPass;
+class RABlock;
+struct RAStackSlot;
+
+typedef ZoneVector<RABlock*> RABlocks;
+typedef ZoneVector<RAWorkReg*> RAWorkRegs;
+
+// ============================================================================
+// [asmjit::RAStrategy]
+// ============================================================================
+
+struct RAStrategy {
+ uint8_t _type;
+
+ enum StrategyType : uint32_t {
+ kStrategySimple = 0,
+ kStrategyComplex = 1
+ };
+
+ inline RAStrategy() noexcept { reset(); }
+ inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+
+ inline uint32_t type() const noexcept { return _type; }
+ inline void setType(uint32_t type) noexcept { _type = uint8_t(type); }
+
+ inline bool isSimple() const noexcept { return _type == kStrategySimple; }
+ inline bool isComplex() const noexcept { return _type >= kStrategyComplex; }
+};
+
+// ============================================================================
+// [asmjit::RAArchTraits]
+// ============================================================================
+
+//! Traits.
+struct RAArchTraits {
+ enum Flags : uint32_t {
+ //! Registers can be swapped by a single instruction.
+ kHasSwap = 0x01u
+ };
+
+ uint8_t _flags[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAArchTraits() noexcept { reset(); }
+ inline void reset() noexcept { memset(_flags, 0, sizeof(_flags)); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool hasFlag(uint32_t group, uint32_t flag) const noexcept { return (_flags[group] & flag) != 0; }
+ inline bool hasSwap(uint32_t group) const noexcept { return hasFlag(group, kHasSwap); }
+
+ inline uint8_t& operator[](uint32_t group) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _flags[group];
+ }
+
+ inline const uint8_t& operator[](uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return _flags[group];
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegCount]
+// ============================================================================
+
+//! Count of virtual or physical registers per group.
+//!
+//! \note This class uses 8-bit integers to represent counters, it's only used
+//! in places where this is sufficient - for example total count of machine's
+//! physical registers, count of virtual registers per instruction, etc. There
+//! is also `RALiveCount`, which uses 32-bit integers and is indeed much safer.
+struct RARegCount {
+ union {
+ uint8_t _regs[4];
+ uint32_t _packed;
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Resets all counters to zero.
+ inline void reset() noexcept { _packed = 0; }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline uint8_t& operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _regs[index];
+ }
+
+ inline const uint8_t& operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _regs[index];
+ }
+
+ inline RARegCount& operator=(const RARegCount& other) noexcept = default;
+
+ inline bool operator==(const RARegCount& other) const noexcept { return _packed == other._packed; }
+ inline bool operator!=(const RARegCount& other) const noexcept { return _packed != other._packed; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Returns the count of registers by the given register `group`.
+ inline uint32_t get(uint32_t group) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ return (_packed >> shift) & uint32_t(0xFF);
+ }
+
+ //! Sets the register count by a register `group`.
+ inline void set(uint32_t group, uint32_t n) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ ASMJIT_ASSERT(n <= 0xFF);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ _packed = (_packed & ~uint32_t(0xFF << shift)) + (n << shift);
+ }
+
+ //! Adds the register count by a register `group`.
+ inline void add(uint32_t group, uint32_t n = 1) noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ ASMJIT_ASSERT(0xFF - uint32_t(_regs[group]) >= n);
+
+ uint32_t shift = Support::byteShiftOfDWordStruct(group);
+ _packed += n << shift;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegIndex]
+// ============================================================================
+
+struct RARegIndex : public RARegCount {
+ //! Build register indexes based on the given `count` of registers.
+ inline void buildIndexes(const RARegCount& count) noexcept {
+ uint32_t x = uint32_t(count._regs[0]);
+ uint32_t y = uint32_t(count._regs[1]) + x;
+ uint32_t z = uint32_t(count._regs[2]) + y;
+
+ ASMJIT_ASSERT(y <= 0xFF);
+ ASMJIT_ASSERT(z <= 0xFF);
+ _packed = Support::bytepack32_4x8(0, x, y, z);
+ }
+};
+
+// ============================================================================
+// [asmjit::RARegMask]
+// ============================================================================
+
+//! Registers mask.
+struct RARegMask {
+ uint32_t _masks[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline void init(const RARegMask& other) noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = other._masks[i];
+ }
+
+ //! Reset all register masks to zero.
+ inline void reset() noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RARegMask& operator=(const RARegMask& other) noexcept = default;
+
+ inline bool operator==(const RARegMask& other) const noexcept {
+ return _masks[0] == other._masks[0] &&
+ _masks[1] == other._masks[1] &&
+ _masks[2] == other._masks[2] &&
+ _masks[3] == other._masks[3] ;
+ }
+
+ inline bool operator!=(const RARegMask& other) const noexcept {
+ return !operator==(other);
+ }
+
+ inline uint32_t& operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _masks[index];
+ }
+
+ inline const uint32_t& operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < BaseReg::kGroupVirt);
+ return _masks[index];
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Tests whether all register masks are zero (empty).
+ inline bool empty() const noexcept {
+ uint32_t m = 0;
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ m |= _masks[i];
+ return m == 0;
+ }
+
+ inline bool has(uint32_t group, uint32_t mask = 0xFFFFFFFFu) const noexcept {
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+ return (_masks[group] & mask) != 0;
+ }
+
+ template<class Operator>
+ inline void op(const RARegMask& other) noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++)
+ _masks[i] = Operator::op(_masks[i], other._masks[i]);
+ }
+
+ template<class Operator>
+ inline void op(uint32_t group, uint32_t input) noexcept {
+ _masks[group] = Operator::op(_masks[group], input);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RARegsStats]
+// ============================================================================
+
+//! Information associated with each instruction, propagated to blocks, loops,
+//! and the whole function. This information can be used to do minor decisions
+//! before the register allocator tries to do its job. For example to use fast
+//! register allocation inside a block or loop it cannot have clobbered and/or
+//! fixed registers, etc...
+struct RARegsStats {
+ uint32_t _packed;
+
+ enum Index : uint32_t {
+ kIndexUsed = 0,
+ kIndexFixed = 8,
+ kIndexClobbered = 16
+ };
+
+ enum Mask : uint32_t {
+ kMaskUsed = 0xFFu << kIndexUsed,
+ kMaskFixed = 0xFFu << kIndexFixed,
+ kMaskClobbered = 0xFFu << kIndexClobbered
+ };
+
+ inline void reset() noexcept { _packed = 0; }
+ inline void combineWith(const RARegsStats& other) noexcept { _packed |= other._packed; }
+
+ inline bool hasUsed() const noexcept { return (_packed & kMaskUsed) != 0u; }
+ inline bool hasUsed(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexUsed + group)) != 0u; }
+ inline void makeUsed(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexUsed + group); }
+
+ inline bool hasFixed() const noexcept { return (_packed & kMaskFixed) != 0u; }
+ inline bool hasFixed(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexFixed + group)) != 0u; }
+ inline void makeFixed(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexFixed + group); }
+
+ inline bool hasClobbered() const noexcept { return (_packed & kMaskClobbered) != 0u; }
+ inline bool hasClobbered(uint32_t group) const noexcept { return (_packed & Support::bitMask(kIndexClobbered + group)) != 0u; }
+ inline void makeClobbered(uint32_t group) noexcept { _packed |= Support::bitMask(kIndexClobbered + group); }
+};
+
+// ============================================================================
+// [asmjit::RALiveCount]
+// ============================================================================
+
+//! Count of live registers, per group.
+class RALiveCount {
+public:
+ uint32_t n[BaseReg::kGroupVirt];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveCount() noexcept { reset(); }
+ inline RALiveCount(const RALiveCount& other) noexcept = default;
+
+ inline void init(const RALiveCount& other) noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = other.n[group];
+ }
+
+ inline void reset() noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = 0;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveCount& operator=(const RALiveCount& other) noexcept = default;
+
+ inline uint32_t& operator[](uint32_t group) noexcept { return n[group]; }
+ inline const uint32_t& operator[](uint32_t group) const noexcept { return n[group]; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ template<class Operator>
+ inline void op(const RALiveCount& other) noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ n[group] = Operator::op(n[group], other.n[group]);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveInterval]
+// ============================================================================
+
+struct RALiveInterval {
+ uint32_t a, b;
+
+ enum Misc : uint32_t {
+ kNaN = 0,
+ kInf = 0xFFFFFFFFu
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveInterval() noexcept : a(0), b(0) {}
+ inline RALiveInterval(uint32_t a, uint32_t b) noexcept : a(a), b(b) {}
+ inline RALiveInterval(const RALiveInterval& other) noexcept : a(other.a), b(other.b) {}
+
+ inline void init(uint32_t aVal, uint32_t bVal) noexcept {
+ a = aVal;
+ b = bVal;
+ }
+ inline void init(const RALiveInterval& other) noexcept { init(other.a, other.b); }
+ inline void reset() noexcept { init(0, 0); }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveInterval& operator=(const RALiveInterval& other) = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isValid() const noexcept { return a < b; }
+ inline uint32_t width() const noexcept { return b - a; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveSpan<T>]
+// ============================================================================
+
+template<typename T>
+class RALiveSpan : public RALiveInterval, public T {
+public:
+ typedef T DataType;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveSpan() noexcept : RALiveInterval(), T() {}
+ inline RALiveSpan(const RALiveSpan<T>& other) noexcept : RALiveInterval(other), T() {}
+ inline RALiveSpan(const RALiveInterval& interval, const T& data) noexcept : RALiveInterval(interval), T(data) {}
+ inline RALiveSpan(uint32_t a, uint32_t b) noexcept : RALiveInterval(a, b), T() {}
+ inline RALiveSpan(uint32_t a, uint32_t b, const T& data) noexcept : RALiveInterval(a, b), T(data) {}
+
+ inline void init(const RALiveSpan<T>& other) noexcept {
+ RALiveInterval::init(static_cast<const RALiveInterval&>(other));
+ T::init(static_cast<const T&>(other));
+ }
+
+ inline void init(const RALiveSpan<T>& span, const T& data) noexcept {
+ RALiveInterval::init(static_cast<const RALiveInterval&>(span));
+ T::init(data);
+ }
+
+ inline void init(const RALiveInterval& interval, const T& data) noexcept {
+ RALiveInterval::init(interval);
+ T::init(data);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RALiveSpan& operator=(const RALiveSpan& other) {
+ init(other);
+ return *this;
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveSpans<T>]
+// ============================================================================
+
+template<typename T>
+class RALiveSpans {
+public:
+ ASMJIT_NONCOPYABLE(RALiveSpans<T>)
+
+ typedef typename T::DataType DataType;
+ ZoneVector<T> _data;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveSpans() noexcept : _data() {}
+
+ inline void reset() noexcept { _data.reset(); }
+ inline void release(ZoneAllocator* allocator) noexcept { _data.release(allocator); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _data.empty(); }
+ inline uint32_t size() const noexcept { return _data.size(); }
+
+ inline T* data() noexcept { return _data.data(); }
+ inline const T* data() const noexcept { return _data.data(); }
+
+ inline bool isOpen() const noexcept {
+ uint32_t size = _data.size();
+ return size > 0 && _data[size - 1].b == RALiveInterval::kInf;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(RALiveSpans<T>& other) noexcept { _data.swap(other._data); }
+
+ //! Open the current live span.
+ ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end) noexcept {
+ bool wasOpen;
+ return openAt(allocator, start, end, wasOpen);
+ }
+
+ ASMJIT_INLINE Error openAt(ZoneAllocator* allocator, uint32_t start, uint32_t end, bool& wasOpen) noexcept {
+ uint32_t size = _data.size();
+ wasOpen = false;
+
+ if (size > 0) {
+ T& last = _data[size - 1];
+ if (last.b >= start) {
+ wasOpen = last.b > start;
+ last.b = end;
+ return kErrorOk;
+ }
+ }
+
+ return _data.append(allocator, T(start, end));
+ }
+
+ inline void closeAt(uint32_t end) noexcept {
+ ASMJIT_ASSERT(!empty());
+
+ uint32_t size = _data.size();
+ _data[size - 1].b = end;
+ }
+
+ //! Returns the sum of width of all spans.
+ //!
+ //! \note Don't overuse, this iterates over all spans so it's O(N).
+ //! It should be only called once and then cached.
+ ASMJIT_INLINE uint32_t width() const noexcept {
+ uint32_t width = 0;
+ for (const T& span : _data)
+ width += span.width();
+ return width;
+ }
+
+ inline T& operator[](uint32_t index) noexcept { return _data[index]; }
+ inline const T& operator[](uint32_t index) const noexcept { return _data[index]; }
+
+ inline bool intersects(const RALiveSpans<T>& other) const noexcept {
+ return intersects(*this, other);
+ }
+
+ ASMJIT_INLINE Error nonOverlappingUnionOf(ZoneAllocator* allocator, const RALiveSpans<T>& x, const RALiveSpans<T>& y, const DataType& yData) noexcept {
+ uint32_t finalSize = x.size() + y.size();
+ ASMJIT_PROPAGATE(_data.reserve(allocator, finalSize));
+
+ T* dstPtr = _data.data();
+ const T* xSpan = x.data();
+ const T* ySpan = y.data();
+
+ const T* xEnd = xSpan + x.size();
+ const T* yEnd = ySpan + y.size();
+
+ // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`,
+ // which means that there is no intersection. We advance either `xSpan` or `ySpan`
+ // depending on their ranges.
+ if (xSpan != xEnd && ySpan != yEnd) {
+ uint32_t xa, ya;
+ xa = xSpan->a;
+ for (;;) {
+ while (ySpan->b <= xa) {
+ dstPtr->init(*ySpan, yData);
+ dstPtr++;
+ if (++ySpan == yEnd)
+ goto Done;
+ }
+
+ ya = ySpan->a;
+ while (xSpan->b <= ya) {
+ *dstPtr++ = *xSpan;
+ if (++xSpan == xEnd)
+ goto Done;
+ }
+
+ // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
+ xa = xSpan->a;
+ if (ySpan->b > xa)
+ return 0xFFFFFFFFu;
+ }
+ }
+
+ Done:
+ while (xSpan != xEnd) {
+ *dstPtr++ = *xSpan++;
+ }
+
+ while (ySpan != yEnd) {
+ dstPtr->init(*ySpan, yData);
+ dstPtr++;
+ ySpan++;
+ }
+
+ _data._setEndPtr(dstPtr);
+ return kErrorOk;
+ }
+
+ static ASMJIT_INLINE bool intersects(const RALiveSpans<T>& x, const RALiveSpans<T>& y) noexcept {
+ const T* xSpan = x.data();
+ const T* ySpan = y.data();
+
+ const T* xEnd = xSpan + x.size();
+ const T* yEnd = ySpan + y.size();
+
+ // Loop until we have intersection or either `xSpan == xEnd` or `ySpan == yEnd`,
+ // which means that there is no intersection. We advance either `xSpan` or `ySpan`
+ // depending on their end positions.
+ if (xSpan == xEnd || ySpan == yEnd)
+ return false;
+
+ uint32_t xa, ya;
+ xa = xSpan->a;
+
+ for (;;) {
+ while (ySpan->b <= xa)
+ if (++ySpan == yEnd)
+ return false;
+
+ ya = ySpan->a;
+ while (xSpan->b <= ya)
+ if (++xSpan == xEnd)
+ return false;
+
+ // We know that `xSpan->b > ySpan->a`, so check if `ySpan->b > xSpan->a`.
+ xa = xSpan->a;
+ if (ySpan->b > xa)
+ return true;
+ }
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RALiveStats]
+// ============================================================================
+
+//! Statistics about a register liveness.
+class RALiveStats {
+public:
+ uint32_t _width;
+ float _freq;
+ float _priority;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALiveStats()
+ : _width(0),
+ _freq(0.0f),
+ _priority(0.0f) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t width() const noexcept { return _width; }
+ inline float freq() const noexcept { return _freq; }
+ inline float priority() const noexcept { return _priority; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::LiveRegData]
+// ============================================================================
+
+struct LiveRegData {
+ uint32_t id;
+
+ inline explicit LiveRegData(uint32_t id = BaseReg::kIdBad) noexcept : id(id) {}
+ inline LiveRegData(const LiveRegData& other) noexcept : id(other.id) {}
+
+ inline void init(const LiveRegData& other) noexcept { id = other.id; }
+
+ inline bool operator==(const LiveRegData& other) const noexcept { return id == other.id; }
+ inline bool operator!=(const LiveRegData& other) const noexcept { return id != other.id; }
+};
+
+typedef RALiveSpan<LiveRegData> LiveRegSpan;
+typedef RALiveSpans<LiveRegSpan> LiveRegSpans;
+
+// ============================================================================
+// [asmjit::RATiedReg]
+// ============================================================================
+
+//! Tied register merges one ore more register operand into a single entity. It
+//! contains information about its access (Read|Write) and allocation slots
+//! (Use|Out) that are used by the register allocator and liveness analysis.
+struct RATiedReg {
+ //! WorkReg id.
+ uint32_t _workId;
+ //! Allocation flags.
+ uint32_t _flags;
+ //! Registers where input {R|X} can be allocated to.
+ uint32_t _allocableRegs;
+ //! Indexes used to rewrite USE regs.
+ uint32_t _useRewriteMask;
+ //! Indexes used to rewrite OUT regs.
+ uint32_t _outRewriteMask;
+
+ union {
+ struct {
+ //! How many times the VirtReg is referenced in all operands.
+ uint8_t _refCount;
+ //! Physical register for use operation (ReadOnly / ReadWrite).
+ uint8_t _useId;
+ //! Physical register for out operation (WriteOnly).
+ uint8_t _outId;
+ //! Reserved for future use (padding).
+ uint8_t _rmSize;
+ };
+ //! Packed data.
+ uint32_t _packed;
+ };
+
+ //! Flags.
+ //!
+ //! Register access information is encoded in 4 flags in total:
+ //!
+ //! - `kRead` - Register is Read (ReadWrite if combined with `kWrite`).
+ //! - `kWrite` - Register is Written (ReadWrite if combined with `kRead`).
+ //! - `kUse` - Encoded as Read or ReadWrite.
+ //! - `kOut` - Encoded as WriteOnly.
+ //!
+ //! Let's describe all of these on two X86 instructions:
+ //!
+ //! - ADD x{R|W|Use}, x{R|Use} -> {x:R|W|Use }
+ //! - LEA x{ W|Out}, [x{R|Use} + x{R|Out}] -> {x:R|W|Use|Out }
+ //! - ADD x{R|W|Use}, y{R|Use} -> {x:R|W|Use y:R|Use}
+ //! - LEA x{ W|Out}, [x{R|Use} + y{R|Out}] -> {x:R|W|Use|Out y:R|Use}
+ //!
+ //! It should be obvious from the example above how these flags get created.
+ //! Each operand contains READ/WRITE information, which is then merged to
+ //! RATiedReg's flags. However, we also need to represent the possitility to
+ //! use see the operation as two independent operations - USE and OUT, because
+ //! the register allocator will first allocate USE registers, and then assign
+ //! OUT registers independently of USE registers.
+ enum Flags : uint32_t {
+ kRead = OpRWInfo::kRead, //!< Register is read.
+ kWrite = OpRWInfo::kWrite, //!< Register is written.
+ kRW = OpRWInfo::kRW, //!< Register both read and written.
+
+ kUse = 0x00000100u, //!< Register has a USE slot (read/rw).
+ kOut = 0x00000200u, //!< Register has an OUT slot (write-only).
+ kUseRM = 0x00000400u, //!< Register in USE slot can be patched to memory.
+ kOutRM = 0x00000800u, //!< Register in OUT slot can be patched to memory.
+
+ kUseFixed = 0x00001000u, //!< Register has a fixed USE slot.
+ kOutFixed = 0x00002000u, //!< Register has a fixed OUT slot.
+ kUseDone = 0x00004000u, //!< Register USE slot has been allocated.
+ kOutDone = 0x00008000u, //!< Register OUT slot has been allocated.
+
+ kDuplicate = 0x00010000u, //!< Register must be duplicated (function call only).
+ kLast = 0x00020000u, //!< Last occurrence of this VirtReg in basic block.
+ kKill = 0x00040000u, //!< Kill this VirtReg after use.
+
+ // Architecture specific flags are used during RATiedReg building to ensure
+ // that architecture-specific constraints are handled properly. These flags
+ // are not really needed after RATiedReg[] is built and copied to `RAInst`.
+
+ kX86Gpb = 0x01000000u //!< This RATiedReg references GPB-LO or GPB-HI.
+ };
+
+ static_assert(kRead == 0x1, "RATiedReg::kRead flag must be 0x1");
+ static_assert(kWrite == 0x2, "RATiedReg::kWrite flag must be 0x2");
+ static_assert(kRW == 0x3, "RATiedReg::kRW combination must be 0x3");
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE void init(uint32_t workId, uint32_t flags, uint32_t allocableRegs, uint32_t useId, uint32_t useRewriteMask, uint32_t outId, uint32_t outRewriteMask, uint32_t rmSize = 0) noexcept {
+ _workId = workId;
+ _flags = flags;
+ _allocableRegs = allocableRegs;
+ _useRewriteMask = useRewriteMask;
+ _outRewriteMask = outRewriteMask;
+ _refCount = 1;
+ _useId = uint8_t(useId);
+ _outId = uint8_t(outId);
+ _rmSize = uint8_t(rmSize);
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline RATiedReg& operator=(const RATiedReg& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the associated WorkReg id.
+ inline uint32_t workId() const noexcept { return _workId; }
+
+ //! Checks if the given `flag` is set, see `Flags`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ //! Returns TiedReg flags, see `RATiedReg::Flags`.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Adds tied register flags, see `Flags`.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ //! Tests whether the register is read (writes `true` also if it's Read/Write).
+ inline bool isRead() const noexcept { return hasFlag(kRead); }
+ //! Tests whether the register is written (writes `true` also if it's Read/Write).
+ inline bool isWrite() const noexcept { return hasFlag(kWrite); }
+ //! Tests whether the register is read only.
+ inline bool isReadOnly() const noexcept { return (_flags & kRW) == kRead; }
+ //! Tests whether the register is write only.
+ inline bool isWriteOnly() const noexcept { return (_flags & kRW) == kWrite; }
+ //! Tests whether the register is read and written.
+ inline bool isReadWrite() const noexcept { return (_flags & kRW) == kRW; }
+
+ //! Tests whether the tied register has use operand (Read/ReadWrite).
+ inline bool isUse() const noexcept { return hasFlag(kUse); }
+ //! Tests whether the tied register has out operand (Write).
+ inline bool isOut() const noexcept { return hasFlag(kOut); }
+
+ //! Tests whether the USE slot can be patched to memory operand.
+ inline bool hasUseRM() const noexcept { return hasFlag(kUseRM); }
+ //! Tests whether the OUT slot can be patched to memory operand.
+ inline bool hasOutRM() const noexcept { return hasFlag(kOutRM); }
+
+ inline uint32_t rmSize() const noexcept { return _rmSize; }
+
+ inline void makeReadOnly() noexcept {
+ _flags = (_flags & ~(kOut | kWrite)) | kUse;
+ _useRewriteMask |= _outRewriteMask;
+ _outRewriteMask = 0;
+ }
+
+ inline void makeWriteOnly() noexcept {
+ _flags = (_flags & ~(kUse | kRead)) | kOut;
+ _outRewriteMask |= _useRewriteMask;
+ _useRewriteMask = 0;
+ }
+
+ //! Tests whether the register would duplicate.
+ inline bool isDuplicate() const noexcept { return hasFlag(kDuplicate); }
+
+ //! Tests whether the register (and the instruction it's part of) appears last in the basic block.
+ inline bool isLast() const noexcept { return hasFlag(kLast); }
+ //! Tests whether the register should be killed after USEd and/or OUTed.
+ inline bool isKill() const noexcept { return hasFlag(kKill); }
+
+ //! Tests whether the register is OUT or KILL (used internally by local register allocator).
+ inline bool isOutOrKill() const noexcept { return hasFlag(kOut | kKill); }
+
+ inline uint32_t allocableRegs() const noexcept { return _allocableRegs; }
+
+ inline uint32_t refCount() const noexcept { return _refCount; }
+ inline void addRefCount(uint32_t n = 1) noexcept { _refCount = uint8_t(_refCount + n); }
+
+ //! Tests whether the register must be allocated to a fixed physical register before it's used.
+ inline bool hasUseId() const noexcept { return _useId != BaseReg::kIdBad; }
+ //! Tests whether the register must be allocated to a fixed physical register before it's written.
+ inline bool hasOutId() const noexcept { return _outId != BaseReg::kIdBad; }
+
+ //! Returns a physical register id used for 'use' operation.
+ inline uint32_t useId() const noexcept { return _useId; }
+ //! Returns a physical register id used for 'out' operation.
+ inline uint32_t outId() const noexcept { return _outId; }
+
+ inline uint32_t useRewriteMask() const noexcept { return _useRewriteMask; }
+ inline uint32_t outRewriteMask() const noexcept { return _outRewriteMask; }
+
+ //! Sets a physical register used for 'use' operation.
+ inline void setUseId(uint32_t index) noexcept { _useId = uint8_t(index); }
+ //! Sets a physical register used for 'out' operation.
+ inline void setOutId(uint32_t index) noexcept { _outId = uint8_t(index); }
+
+ inline bool isUseDone() const noexcept { return hasFlag(kUseDone); }
+ inline bool isOutDone() const noexcept { return hasFlag(kUseDone); }
+
+ inline void markUseDone() noexcept { addFlags(kUseDone); }
+ inline void markOutDone() noexcept { addFlags(kUseDone); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAWorkReg]
+// ============================================================================
+
+class RAWorkReg {
+public:
+ ASMJIT_NONCOPYABLE(RAWorkReg)
+
+ //! RAPass specific ID used during analysis and allocation.
+ uint32_t _workId;
+ //! Copy of ID used by `VirtReg`.
+ uint32_t _virtId;
+
+ //! Permanent association with `VirtReg`.
+ VirtReg* _virtReg;
+ //! Temporary association with `RATiedReg`.
+ RATiedReg* _tiedReg;
+ //! Stack slot associated with the register.
+ RAStackSlot* _stackSlot;
+
+ //! Copy of a signature used by `VirtReg`.
+ RegInfo _info;
+ //! RAPass specific flags used during analysis and allocation.
+ uint32_t _flags;
+ //! IDs of all physical registers this WorkReg has been allocated to.
+ uint32_t _allocatedMask;
+ //! IDs of all physical registers that are clobbered during the lifetime of
+ //! this WorkReg.
+ //!
+ //! This mask should be updated by `RAPass::buildLiveness()`, because it's
+ //! global and should be updated after unreachable code has been removed.
+ uint32_t _clobberSurvivalMask;
+
+ //! A byte-mask where each bit represents one valid byte of the register.
+ uint64_t _regByteMask;
+
+ //! Argument index (or `kNoArgIndex` if none).
+ uint8_t _argIndex;
+ //! Global home register ID (if any, assigned by RA).
+ uint8_t _homeRegId;
+ //! Global hint register ID (provided by RA or user).
+ uint8_t _hintRegId;
+
+ //! Live spans of the `VirtReg`.
+ LiveRegSpans _liveSpans;
+ //! Live statistics.
+ RALiveStats _liveStats;
+
+ //! All nodes that read/write this VirtReg/WorkReg.
+ ZoneVector<BaseNode*> _refs;
+ //! All nodes that write to this VirtReg/WorkReg.
+ ZoneVector<BaseNode*> _writes;
+
+ enum Ids : uint32_t {
+ kIdNone = 0xFFFFFFFFu
+ };
+
+ enum Flags : uint32_t {
+ //! Has been coalesced to another WorkReg.
+ kFlagCoalesced = 0x00000001u,
+ //! Stack slot has to be allocated.
+ kFlagStackUsed = 0x00000002u,
+ //! Stack allocation is preferred.
+ kFlagStackPreferred = 0x00000004u,
+ //! Marked for stack argument reassignment.
+ kFlagStackArgToStack = 0x00000008u,
+
+ // TODO: Used?
+ kFlagDirtyStats = 0x80000000u
+ };
+
+ enum ArgIndex : uint32_t {
+ kNoArgIndex = 0xFFu
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE RAWorkReg(VirtReg* vReg, uint32_t workId) noexcept
+ : _workId(workId),
+ _virtId(vReg->id()),
+ _virtReg(vReg),
+ _tiedReg(nullptr),
+ _stackSlot(nullptr),
+ _info(vReg->info()),
+ _flags(kFlagDirtyStats),
+ _allocatedMask(0),
+ _clobberSurvivalMask(0),
+ _regByteMask(0),
+ _argIndex(kNoArgIndex),
+ _homeRegId(BaseReg::kIdBad),
+ _hintRegId(BaseReg::kIdBad),
+ _liveSpans(),
+ _liveStats(),
+ _refs() {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t workId() const noexcept { return _workId; }
+ inline uint32_t virtId() const noexcept { return _virtId; }
+
+ inline const char* name() const noexcept { return _virtReg->name(); }
+ inline uint32_t nameSize() const noexcept { return _virtReg->nameSize(); }
+
+ inline uint32_t typeId() const noexcept { return _virtReg->typeId(); }
+
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ inline bool isStackUsed() const noexcept { return hasFlag(kFlagStackUsed); }
+ inline void markStackUsed() noexcept { addFlags(kFlagStackUsed); }
+
+ inline bool isStackPreferred() const noexcept { return hasFlag(kFlagStackPreferred); }
+ inline void markStackPreferred() noexcept { addFlags(kFlagStackPreferred); }
+
+ //! Tests whether this RAWorkReg has been coalesced with another one (cannot be used anymore).
+ inline bool isCoalesced() const noexcept { return hasFlag(kFlagCoalesced); }
+
+ inline const RegInfo& info() const noexcept { return _info; }
+ inline uint32_t group() const noexcept { return _info.group(); }
+ inline uint32_t signature() const noexcept { return _info.signature(); }
+
+ inline VirtReg* virtReg() const noexcept { return _virtReg; }
+
+ inline bool hasTiedReg() const noexcept { return _tiedReg != nullptr; }
+ inline RATiedReg* tiedReg() const noexcept { return _tiedReg; }
+ inline void setTiedReg(RATiedReg* tiedReg) noexcept { _tiedReg = tiedReg; }
+ inline void resetTiedReg() noexcept { _tiedReg = nullptr; }
+
+ inline bool hasStackSlot() const noexcept { return _stackSlot != nullptr; }
+ inline RAStackSlot* stackSlot() const noexcept { return _stackSlot; }
+
+ inline LiveRegSpans& liveSpans() noexcept { return _liveSpans; }
+ inline const LiveRegSpans& liveSpans() const noexcept { return _liveSpans; }
+
+ inline RALiveStats& liveStats() noexcept { return _liveStats; }
+ inline const RALiveStats& liveStats() const noexcept { return _liveStats; }
+
+ inline bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; }
+ inline uint32_t argIndex() const noexcept { return _argIndex; }
+ inline void setArgIndex(uint32_t index) noexcept { _argIndex = uint8_t(index); }
+
+ inline bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; }
+ inline uint32_t homeRegId() const noexcept { return _homeRegId; }
+ inline void setHomeRegId(uint32_t physId) noexcept { _homeRegId = uint8_t(physId); }
+
+ inline bool hasHintRegId() const noexcept { return _hintRegId != BaseReg::kIdBad; }
+ inline uint32_t hintRegId() const noexcept { return _hintRegId; }
+ inline void setHintRegId(uint32_t physId) noexcept { _hintRegId = uint8_t(physId); }
+
+ inline uint32_t allocatedMask() const noexcept { return _allocatedMask; }
+ inline void addAllocatedMask(uint32_t mask) noexcept { _allocatedMask |= mask; }
+
+ inline uint32_t clobberSurvivalMask() const noexcept { return _clobberSurvivalMask; }
+ inline void addClobberSurvivalMask(uint32_t mask) noexcept { _clobberSurvivalMask |= mask; }
+
+ inline uint64_t regByteMask() const noexcept { return _regByteMask; }
+ inline void setRegByteMask(uint64_t mask) noexcept { _regByteMask = mask; }
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RADEFS_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/ralocal.cpp b/3rdparty/asmjit/src/asmjit/core/ralocal.cpp
new file mode 100644
index 00000000000..98d7d8fd568
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/ralocal.cpp
@@ -0,0 +1,1041 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/ralocal_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Utilities]
+// ============================================================================
+
+static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
+ for (size_t i = 0; i < count; i++)
+ if (tiedRegs[i].workId() == workId)
+ return &tiedRegs[i];
+ return nullptr;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Init / Reset]
+// ============================================================================
+
+Error RALocalAllocator::init() noexcept {
+ PhysToWorkMap* physToWorkMap;
+ WorkToPhysMap* workToPhysMap;
+
+ physToWorkMap = _pass->newPhysToWorkMap();
+ workToPhysMap = _pass->newWorkToPhysMap();
+ if (!physToWorkMap || !workToPhysMap)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _curAssignment.initLayout(_pass->_physRegCount, _pass->workRegs());
+ _curAssignment.initMaps(physToWorkMap, workToPhysMap);
+
+ physToWorkMap = _pass->newPhysToWorkMap();
+ workToPhysMap = _pass->newWorkToPhysMap();
+ if (!physToWorkMap || !workToPhysMap)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _tmpAssignment.initLayout(_pass->_physRegCount, _pass->workRegs());
+ _tmpAssignment.initMaps(physToWorkMap, workToPhysMap);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Assignment]
+// ============================================================================
+
+Error RALocalAllocator::makeInitialAssignment() noexcept {
+ FuncNode* func = _pass->func();
+ RABlock* entry = _pass->entryBlock();
+
+ ZoneBitVector& liveIn = entry->liveIn();
+ uint32_t argCount = func->argCount();
+ uint32_t numIter = 1;
+
+ for (uint32_t iter = 0; iter < numIter; iter++) {
+ for (uint32_t i = 0; i < argCount; i++) {
+ // Unassigned argument.
+ VirtReg* virtReg = func->arg(i);
+ if (!virtReg) continue;
+
+ // Unreferenced argument.
+ RAWorkReg* workReg = virtReg->workReg();
+ if (!workReg) continue;
+
+ // Overwritten argument.
+ uint32_t workId = workReg->workId();
+ if (!liveIn.bitAt(workId))
+ continue;
+
+ uint32_t group = workReg->group();
+ if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
+ continue;
+
+ uint32_t allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
+ if (iter == 0) {
+ // First iteration: Try to allocate to home RegId.
+ if (workReg->hasHomeRegId()) {
+ uint32_t physId = workReg->homeRegId();
+ if (Support::bitTest(allocableRegs, physId)) {
+ _curAssignment.assign(group, workId, physId, true);
+ _pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
+ continue;
+ }
+ }
+
+ numIter = 2;
+ }
+ else {
+ // Second iteration: Pick any other register if the is an unassigned one or assign to stack.
+ if (allocableRegs) {
+ uint32_t physId = Support::ctz(allocableRegs);
+ _curAssignment.assign(group, workId, physId, true);
+ _pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
+ }
+ else {
+ // This register will definitely need stack, create the slot now and assign also `argIndex`
+ // to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
+ RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // This means STACK_ARG may be moved to STACK.
+ workReg->addFlags(RAWorkReg::kFlagStackArgToStack);
+ _pass->_numStackArgsToStackSlots++;
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::replaceAssignment(
+ const PhysToWorkMap* physToWorkMap,
+ const WorkToPhysMap* workToPhysMap) noexcept {
+
+ _curAssignment.copyFrom(physToWorkMap, workToPhysMap);
+ return kErrorOk;
+}
+
+Error RALocalAllocator::switchToAssignment(
+ PhysToWorkMap* dstPhysToWorkMap,
+ WorkToPhysMap* dstWorkToPhysMap,
+ const ZoneBitVector& liveIn,
+ bool dstReadOnly,
+ bool tryMode) noexcept {
+
+ RAAssignment dst;
+ RAAssignment& cur = _curAssignment;
+
+ dst.initLayout(_pass->_physRegCount, _pass->workRegs());
+ dst.initMaps(dstPhysToWorkMap, dstWorkToPhysMap);
+
+ if (tryMode)
+ return kErrorOk;
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ // ------------------------------------------------------------------------
+ // STEP 1:
+ // - KILL all registers that are not live at `dst`,
+ // - SPILL all registers that are not assigned at `dst`.
+ // ------------------------------------------------------------------------
+
+ if (!tryMode) {
+ Support::BitWordIterator<uint32_t> it(cur.assigned(group));
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = cur.physToWorkId(group, physId);
+
+ // Must be true as we iterate over assigned registers.
+ ASMJIT_ASSERT(workId != RAAssignment::kWorkNone);
+
+ // KILL if it's not live on entry.
+ if (!liveIn.bitAt(workId)) {
+ onKillReg(group, workId, physId);
+ continue;
+ }
+
+ // SPILL if it's not assigned on entry.
+ uint32_t altId = dst.workToPhysId(group, workId);
+ if (altId == RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 2:
+ // - MOVE and SWAP registers from their current assignments into their
+ // DST assignments.
+ // - Build `willLoadRegs` mask of registers scheduled for `onLoadReg()`.
+ // ------------------------------------------------------------------------
+
+ // Current run-id (1 means more aggressive decisions).
+ int32_t runId = -1;
+ // Remaining registers scheduled for `onLoadReg()`.
+ uint32_t willLoadRegs = 0;
+ // Remaining registers to be allocated in this loop.
+ uint32_t affectedRegs = dst.assigned(group);
+
+ while (affectedRegs) {
+ if (++runId == 2) {
+ if (!tryMode)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // Stop in `tryMode` if we haven't done anything in past two rounds.
+ break;
+ }
+
+ Support::BitWordIterator<uint32_t> it(affectedRegs);
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t physMask = Support::bitMask(physId);
+
+ uint32_t curWorkId = cur.physToWorkId(group, physId);
+ uint32_t dstWorkId = dst.physToWorkId(group, physId);
+
+ // The register must have assigned `dstWorkId` as we only iterate over assigned regs.
+ ASMJIT_ASSERT(dstWorkId != RAAssignment::kWorkNone);
+
+ if (curWorkId != RAAssignment::kWorkNone) {
+ // Both assigned.
+ if (curWorkId != dstWorkId) {
+ // Wait a bit if this is the first run, we may avoid this if `curWorkId` moves out.
+ if (runId <= 0)
+ continue;
+
+ uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
+ if (altPhysId == RAAssignment::kPhysNone)
+ continue;
+
+ // Reset as we will do some changes to the current assignment.
+ runId = -1;
+
+ if (_archTraits.hasSwap(group)) {
+ ASMJIT_PROPAGATE(onSwapReg(group, curWorkId, physId, dstWorkId, altPhysId));
+ }
+ else {
+ // SPILL the reg if it's not dirty in DST, otherwise try to MOVE.
+ if (!cur.isPhysDirty(group, physId)) {
+ ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId));
+ }
+ else {
+ uint32_t allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
+
+ // If possible don't conflict with assigned regs at DST.
+ if (allocableRegs & ~dst.assigned(group))
+ allocableRegs &= ~dst.assigned(group);
+
+ if (allocableRegs) {
+ // MOVE is possible, thus preferred.
+ uint32_t tmpPhysId = Support::ctz(allocableRegs);
+
+ ASMJIT_PROPAGATE(onMoveReg(group, curWorkId, tmpPhysId, physId));
+ _pass->_clobberedRegs[group] |= Support::bitMask(tmpPhysId);
+ }
+ else {
+ // MOVE is impossible, must SPILL.
+ ASMJIT_PROPAGATE(onSpillReg(group, curWorkId, physId));
+ }
+ }
+
+ goto Cleared;
+ }
+ }
+ }
+ else {
+Cleared:
+ // DST assigned, CUR unassigned.
+ uint32_t altPhysId = cur.workToPhysId(group, dstWorkId);
+ if (altPhysId == RAAssignment::kPhysNone) {
+ if (liveIn.bitAt(dstWorkId))
+ willLoadRegs |= physMask; // Scheduled for `onLoadReg()`.
+ affectedRegs &= ~physMask; // Unaffected from now.
+ continue;
+ }
+ ASMJIT_PROPAGATE(onMoveReg(group, dstWorkId, physId, altPhysId));
+ }
+
+ // Both DST and CUR assigned to the same reg or CUR just moved to DST.
+ if ((dst.dirty(group) & physMask) != (cur.dirty(group) & physMask)) {
+ if ((dst.dirty(group) & physMask) == 0) {
+ // CUR dirty, DST not dirty (the assert is just to visualize the condition).
+ ASMJIT_ASSERT(!dst.isPhysDirty(group, physId) && cur.isPhysDirty(group, physId));
+
+ // If `dstReadOnly` is true it means that that block was already
+ // processed and we cannot change from CLEAN to DIRTY. In that case
+ // the register has to be saved as it cannot enter the block DIRTY.
+ if (dstReadOnly)
+ ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId));
+ else
+ dst.makeDirty(group, dstWorkId, physId);
+ }
+ else {
+ // DST dirty, CUR not dirty (the assert is just to visualize the condition).
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) && !cur.isPhysDirty(group, physId));
+
+ cur.makeDirty(group, dstWorkId, physId);
+ }
+ }
+
+ // Must match now...
+ ASMJIT_ASSERT(dst.physToWorkId(group, physId) == cur.physToWorkId(group, physId));
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
+
+ runId = -1;
+ affectedRegs &= ~physMask;
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 3:
+ // - Load registers specified by `willLoadRegs`.
+ // ------------------------------------------------------------------------
+
+ {
+ Support::BitWordIterator<uint32_t> it(willLoadRegs);
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+
+ if (!cur.isPhysAssigned(group, physId)) {
+ uint32_t workId = dst.physToWorkId(group, physId);
+
+ // The algorithm is broken if it tries to load a register that is not in LIVE-IN.
+ ASMJIT_ASSERT(liveIn.bitAt(workId) == true);
+
+ ASMJIT_PROPAGATE(onLoadReg(group, workId, physId));
+ if (dst.isPhysDirty(group, physId))
+ cur.makeDirty(group, workId, physId);
+ ASMJIT_ASSERT(dst.isPhysDirty(group, physId) == cur.isPhysDirty(group, physId));
+ }
+ else {
+ // Not possible otherwise.
+ ASMJIT_ASSERT(tryMode == true);
+ }
+ }
+ }
+ }
+
+ if (!tryMode) {
+ // Hre is a code that dumps the conflicting part if something fails here:
+ // if (!dst.equals(cur)) {
+ // uint32_t physTotal = dst._layout.physTotal;
+ // uint32_t workCount = dst._layout.workCount;
+ //
+ // for (uint32_t physId = 0; physId < physTotal; physId++) {
+ // uint32_t dstWorkId = dst._physToWorkMap->workIds[physId];
+ // uint32_t curWorkId = cur._physToWorkMap->workIds[physId];
+ // if (dstWorkId != curWorkId)
+ // fprintf(stderr, "[PhysIdWork] PhysId=%u WorkId[DST(%u) != CUR(%u)]\n", physId, dstWorkId, curWorkId);
+ // }
+ //
+ // for (uint32_t workId = 0; workId < workCount; workId++) {
+ // uint32_t dstPhysId = dst._workToPhysMap->physIds[workId];
+ // uint32_t curPhysId = cur._workToPhysMap->physIds[workId];
+ // if (dstPhysId != curPhysId)
+ // fprintf(stderr, "[WorkToPhys] WorkId=%u PhysId[DST(%u) != CUR(%u)]\n", workId, dstPhysId, curPhysId);
+ // }
+ // }
+ ASMJIT_ASSERT(dst.equals(cur));
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::spillGpScratchRegsBeforeEntry(uint32_t scratchRegs) noexcept {
+ uint32_t group = BaseReg::kGroupGp;
+ Support::BitWordIterator<uint32_t> it(scratchRegs);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ if (_curAssignment.isPhysAssigned(group, physId)) {
+ uint32_t workId = _curAssignment.physToWorkId(group, physId);
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Allocation]
+// ============================================================================
+
+Error RALocalAllocator::allocInst(InstNode* node) noexcept {
+ RAInst* raInst = node->passData<RAInst>();
+
+ RATiedReg* outTiedRegs[Globals::kMaxPhysRegs];
+ RATiedReg* dupTiedRegs[Globals::kMaxPhysRegs];
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ _node = node;
+ _raInst = raInst;
+ _tiedTotal = raInst->_tiedTotal;
+ _tiedCount = raInst->_tiedCount;
+
+ // Whether we already replaced register operand with memory operand.
+ bool rmAllocated = false;
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ uint32_t i, count = this->tiedCount(group);
+ RATiedReg* tiedRegs = this->tiedRegs(group);
+
+ uint32_t willUse = _raInst->_usedRegs[group];
+ uint32_t willOut = _raInst->_clobberedRegs[group];
+ uint32_t willFree = 0;
+ uint32_t usePending = count;
+
+ uint32_t outTiedCount = 0;
+ uint32_t dupTiedCount = 0;
+
+ // ------------------------------------------------------------------------
+ // STEP 1:
+ //
+ // Calculate `willUse` and `willFree` masks based on tied registers we have.
+ //
+ // We don't do any assignment decisions at this stage as we just need to
+ // collect some information first. Then, after we populate all masks needed
+ // we can finally make some decisions in the second loop. The main reason
+ // for this is that we really need `willFree` to make assignment decisions
+ // for `willUse`, because if we mark some registers that will be freed, we
+ // can consider them in decision making afterwards.
+ // ------------------------------------------------------------------------
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+
+ // Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment.
+ if (tiedReg->isOutOrKill())
+ outTiedRegs[outTiedCount++] = tiedReg;
+
+ if (tiedReg->isDuplicate())
+ dupTiedRegs[dupTiedCount++] = tiedReg;
+
+ if (!tiedReg->isUse()) {
+ tiedReg->markUseDone();
+ usePending--;
+ continue;
+ }
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ if (tiedReg->hasUseId()) {
+ // If the register has `useId` it means it can only be allocated in that register.
+ uint32_t useMask = Support::bitMask(tiedReg->useId());
+
+ // RAInstBuilder must have collected `usedRegs` on-the-fly.
+ ASMJIT_ASSERT((willUse & useMask) != 0);
+
+ if (assignedId == tiedReg->useId()) {
+ // If the register is already allocated in this one, mark it done and continue.
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, assignedId);
+ usePending--;
+ willUse |= useMask;
+ }
+ else {
+ willFree |= useMask & _curAssignment.assigned(group);
+ }
+ }
+ else {
+ // Check if the register must be moved to `allocableRegs`.
+ uint32_t allocableRegs = tiedReg->allocableRegs();
+ if (assignedId != RAAssignment::kPhysNone) {
+ uint32_t assignedMask = Support::bitMask(assignedId);
+ if ((allocableRegs & ~willUse) & assignedMask) {
+ tiedReg->setUseId(assignedId);
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, assignedId);
+ usePending--;
+ willUse |= assignedMask;
+ }
+ else {
+ willFree |= assignedMask;
+ }
+ }
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 2:
+ //
+ // Do some decision making to find the best candidates of registers that
+ // need to be assigned, moved, and/or spilled. Only USE registers are
+ // considered here, OUT will be decided later after all CLOBBERed and OUT
+ // registers are unassigned.
+ // ------------------------------------------------------------------------
+
+ if (usePending) {
+ // TODO: Not sure `liveRegs` should be used, maybe willUse and willFree would be enough and much more clear.
+
+ // All registers that are currently alive without registers that will be freed.
+ uint32_t liveRegs = _curAssignment.assigned(group) & ~willFree;
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+ if (tiedReg->isUseDone()) continue;
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ // REG/MEM: Patch register operand to memory operand if not allocated.
+ if (!rmAllocated && tiedReg->hasUseRM()) {
+ if (assignedId == RAAssignment::kPhysNone && Support::isPowerOf2(tiedReg->useRewriteMask())) {
+ RAWorkReg* workReg = workRegById(tiedReg->workId());
+ uint32_t opIndex = Support::ctz(tiedReg->useRewriteMask()) / uint32_t(sizeof(Operand) / sizeof(uint32_t));
+ uint32_t rmSize = tiedReg->rmSize();
+
+ if (rmSize <= workReg->virtReg()->virtSize()) {
+ Operand& op = node->operands()[opIndex];
+ op = _pass->workRegAsMem(workReg);
+ op.as<BaseMem>().setSize(rmSize);
+ tiedReg->_useRewriteMask = 0;
+
+ tiedReg->markUseDone();
+ usePending--;
+
+ rmAllocated = true;
+ continue;
+ }
+ }
+ }
+
+ if (!tiedReg->hasUseId()) {
+ uint32_t allocableRegs = tiedReg->allocableRegs() & ~(willFree | willUse);
+
+ // DECIDE where to assign the USE register.
+ uint32_t useId = decideOnAssignment(group, workId, assignedId, allocableRegs);
+ uint32_t useMask = Support::bitMask(useId);
+
+ willUse |= useMask;
+ willFree |= useMask & liveRegs;
+ tiedReg->setUseId(useId);
+
+ if (assignedId != RAAssignment::kPhysNone) {
+ uint32_t assignedMask = Support::bitMask(assignedId);
+
+ willFree |= assignedMask;
+ liveRegs &= ~assignedMask;
+
+ // OPTIMIZATION: Assign the USE register here if it's possible.
+ if (!(liveRegs & useMask)) {
+ ASMJIT_PROPAGATE(onMoveReg(group, workId, useId, assignedId));
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, useId);
+ usePending--;
+ }
+ }
+ else {
+ // OPTIMIZATION: Assign the USE register here if it's possible.
+ if (!(liveRegs & useMask)) {
+ ASMJIT_PROPAGATE(onLoadReg(group, workId, useId));
+ tiedReg->markUseDone();
+ if (tiedReg->isWrite())
+ _curAssignment.makeDirty(group, workId, useId);
+ usePending--;
+ }
+ }
+
+ liveRegs |= useMask;
+ }
+ }
+ }
+
+ // Initially all used regs will be marked clobbered.
+ uint32_t clobberedByInst = willUse | willOut;
+
+ // ------------------------------------------------------------------------
+ // STEP 3:
+ //
+ // Free all registers that we marked as `willFree`. Only registers that are not
+ // USEd by the instruction are considered as we don't want to free regs we need.
+ // ------------------------------------------------------------------------
+
+ if (willFree) {
+ uint32_t allocableRegs = _availableRegs[group] & ~(_curAssignment.assigned(group) | willFree | willUse | willOut);
+ Support::BitWordIterator<uint32_t> it(willFree);
+
+ do {
+ uint32_t assignedId = it.next();
+ if (_curAssignment.isPhysAssigned(group, assignedId)) {
+ uint32_t workId = _curAssignment.physToWorkId(group, assignedId);
+
+ // DECIDE whether to MOVE or SPILL.
+ if (allocableRegs) {
+ uint32_t reassignedId = decideOnUnassignment(group, workId, assignedId, allocableRegs);
+ if (reassignedId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onMoveReg(group, workId, reassignedId, assignedId));
+ allocableRegs ^= Support::bitMask(reassignedId);
+ continue;
+ }
+ }
+
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, assignedId));
+ }
+ } while (it.hasNext());
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 4:
+ //
+ // ALLOCATE / SHUFFLE all registers that we marked as `willUse` and weren't
+ // allocated yet. This is a bit complicated as the allocation is iterative.
+ // In some cases we have to wait before allocating a particual physical
+ // register as it's still occupied by some other one, which we need to move
+ // before we can use it. In this case we skip it and allocate another some
+ // other instead (making it free for another iteration).
+ //
+ // NOTE: Iterations are mostly important for complicated allocations like
+ // function calls, where there can be up to N registers used at once. Asm
+ // instructions won't run the loop more than once in 99.9% of cases as they
+ // use 2..3 registers in average.
+ // ------------------------------------------------------------------------
+
+ if (usePending) {
+ bool mustSwap = false;
+ do {
+ uint32_t oldPending = usePending;
+
+ for (i = 0; i < count; i++) {
+ RATiedReg* thisTiedReg = &tiedRegs[i];
+ if (thisTiedReg->isUseDone()) continue;
+
+ uint32_t thisWorkId = thisTiedReg->workId();
+ uint32_t thisPhysId = _curAssignment.workToPhysId(group, thisWorkId);
+
+ // This would be a bug, fatal one!
+ uint32_t targetPhysId = thisTiedReg->useId();
+ ASMJIT_ASSERT(targetPhysId != thisPhysId);
+
+ uint32_t targetWorkId = _curAssignment.physToWorkId(group, targetPhysId);
+ if (targetWorkId != RAAssignment::kWorkNone) {
+ RAWorkReg* targetWorkReg = workRegById(targetWorkId);
+
+ // Swapping two registers can solve two allocation tasks by emitting
+ // just a single instruction. However, swap is only available on few
+ // architectures and it's definitely not available for each register
+ // group. Calling `onSwapReg()` before checking these would be fatal.
+ if (_archTraits.hasSwap(group) && thisPhysId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onSwapReg(group, thisWorkId, thisPhysId, targetWorkId, targetPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+
+ // Double-hit.
+ RATiedReg* targetTiedReg = RALocal_findTiedRegByWorkId(tiedRegs, count, targetWorkReg->workId());
+ if (targetTiedReg && targetTiedReg->useId() == thisPhysId) {
+ targetTiedReg->markUseDone();
+ if (targetTiedReg->isWrite())
+ _curAssignment.makeDirty(group, targetWorkId, thisPhysId);
+ usePending--;
+ }
+ continue;
+ }
+
+ if (!mustSwap)
+ continue;
+
+ // Only branched here if the previous iteration did nothing. This is
+ // essentially a SWAP operation without having a dedicated instruction
+ // for that purpose (vector registers, etc). The simplest way to
+ // handle such case is to SPILL the target register.
+ ASMJIT_PROPAGATE(onSpillReg(group, targetWorkId, targetPhysId));
+ }
+
+ if (thisPhysId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onMoveReg(group, thisWorkId, targetPhysId, thisPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+ }
+ else {
+ ASMJIT_PROPAGATE(onLoadReg(group, thisWorkId, targetPhysId));
+
+ thisTiedReg->markUseDone();
+ if (thisTiedReg->isWrite())
+ _curAssignment.makeDirty(group, thisWorkId, targetPhysId);
+ usePending--;
+ }
+ }
+
+ mustSwap = (oldPending == usePending);
+ } while (usePending);
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 5:
+ //
+ // KILL registers marked as KILL/OUT.
+ // ------------------------------------------------------------------------
+
+ uint32_t outPending = outTiedCount;
+ if (outTiedCount) {
+ for (i = 0; i < outTiedCount; i++) {
+ RATiedReg* tiedReg = outTiedRegs[i];
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t physId = _curAssignment.workToPhysId(group, workId);
+
+ // Must check if it's allocated as KILL can be related to OUT (like KILL
+ // immediately after OUT, which could mean the register is not assigned).
+ if (physId != RAAssignment::kPhysNone) {
+ ASMJIT_PROPAGATE(onKillReg(group, workId, physId));
+ willOut &= ~Support::bitMask(physId);
+ }
+
+ // We still maintain number of pending registers for OUT assignment.
+ // So, if this is only KILL, not OUT, we can safely decrement it.
+ outPending -= !tiedReg->isOut();
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 6:
+ //
+ // SPILL registers that will be CLOBBERed. Since OUT and KILL were
+ // already processed this is used mostly to handle function CALLs.
+ // ------------------------------------------------------------------------
+
+ if (willOut) {
+ Support::BitWordIterator<uint32_t> it(willOut);
+ do {
+ uint32_t physId = it.next();
+ uint32_t workId = _curAssignment.physToWorkId(group, physId);
+
+ if (workId == RAAssignment::kWorkNone)
+ continue;
+
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, physId));
+ } while (it.hasNext());
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 7:
+ //
+ // Duplication.
+ // ------------------------------------------------------------------------
+
+ for (i = 0; i < dupTiedCount; i++) {
+ RATiedReg* tiedReg = dupTiedRegs[i];
+ uint32_t workId = tiedReg->workId();
+ uint32_t srcId = tiedReg->useId();
+
+ Support::BitWordIterator<uint32_t> it(tiedReg->_allocableRegs);
+ while (it.hasNext()) {
+ uint32_t dstId = it.next();
+ if (dstId == srcId)
+ continue;
+ _pass->onEmitMove(workId, dstId, srcId);
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // STEP 8:
+ //
+ // Assign OUT registers.
+ // ------------------------------------------------------------------------
+
+ if (outPending) {
+ // Live registers, we need a separate variable (outside of `_curAssignment)
+ // to hold these because of KILLed registers. If we KILL a register here it
+ // will go out from `_curAssignment`, but we cannot assign to it in here.
+ uint32_t liveRegs = _curAssignment.assigned(group);
+
+ // Must avoid as they have been already OUTed (added during the loop).
+ uint32_t outRegs = 0;
+
+ // Must avoid as they collide with already allocated ones.
+ uint32_t avoidRegs = willUse & ~clobberedByInst;
+
+ for (i = 0; i < outTiedCount; i++) {
+ RATiedReg* tiedReg = outTiedRegs[i];
+ if (!tiedReg->isOut()) continue;
+
+ uint32_t workId = tiedReg->workId();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+
+ if (assignedId != RAAssignment::kPhysNone)
+ ASMJIT_PROPAGATE(onKillReg(group, workId, assignedId));
+
+ uint32_t physId = tiedReg->outId();
+ if (physId == RAAssignment::kPhysNone) {
+ uint32_t allocableRegs = _availableRegs[group] & ~(outRegs | avoidRegs);
+
+ if (!(allocableRegs & ~liveRegs)) {
+ // There are no more registers, decide which one to spill.
+ uint32_t spillWorkId;
+ physId = decideOnSpillFor(group, workId, allocableRegs & liveRegs, &spillWorkId);
+ ASMJIT_PROPAGATE(onSpillReg(group, spillWorkId, physId));
+ }
+ else {
+ physId = decideOnAssignment(group, workId, RAAssignment::kPhysNone, allocableRegs & ~liveRegs);
+ }
+ }
+
+ // OUTs are CLOBBERed thus cannot be ASSIGNed right now.
+ ASMJIT_ASSERT(!_curAssignment.isPhysAssigned(group, physId));
+
+ if (!tiedReg->isKill())
+ ASMJIT_PROPAGATE(onAssignReg(group, workId, physId, true));
+
+ tiedReg->setOutId(physId);
+ tiedReg->markOutDone();
+
+ outRegs |= Support::bitMask(physId);
+ liveRegs &= ~Support::bitMask(physId);
+ outPending--;
+ }
+
+ clobberedByInst |= outRegs;
+ ASMJIT_ASSERT(outPending == 0);
+ }
+
+ _clobberedRegs[group] |= clobberedByInst;
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::spillAfterAllocation(InstNode* node) noexcept {
+ // This is experimental feature that would spill registers that don't have
+ // home-id and are last in this basic block. This prevents saving these regs
+ // in other basic blocks and then restoring them (mostly relevant for loops).
+ RAInst* raInst = node->passData<RAInst>();
+ uint32_t count = raInst->tiedCount();
+
+ for (uint32_t i = 0; i < count; i++) {
+ RATiedReg* tiedReg = raInst->tiedAt(i);
+ if (tiedReg->isLast()) {
+ uint32_t workId = tiedReg->workId();
+ RAWorkReg* workReg = workRegById(workId);
+ if (!workReg->hasHomeRegId()) {
+ uint32_t group = workReg->group();
+ uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
+ if (assignedId != RAAssignment::kPhysNone) {
+ _cc->_setCursor(node);
+ ASMJIT_PROPAGATE(onSpillReg(group, workId, assignedId));
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept {
+ // TODO: This should be used to make the branch allocation better.
+ DebugUtils::unused(cont);
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ // Use TryMode of `switchToAssignment()` if possible.
+ if (target->hasEntryAssignment()) {
+ ASMJIT_PROPAGATE(switchToAssignment(
+ target->entryPhysToWorkMap(),
+ target->entryWorkToPhysMap(),
+ target->liveIn(),
+ target->isAllocated(),
+ true));
+ }
+
+ ASMJIT_PROPAGATE(allocInst(node));
+ ASMJIT_PROPAGATE(spillRegsBeforeEntry(target));
+
+ if (target->hasEntryAssignment()) {
+ BaseNode* injectionPoint = _pass->extraBlock()->prev();
+ BaseNode* prevCursor = _cc->setCursor(injectionPoint);
+
+ _tmpAssignment.copyFrom(_curAssignment);
+ ASMJIT_PROPAGATE(switchToAssignment(
+ target->entryPhysToWorkMap(),
+ target->entryWorkToPhysMap(),
+ target->liveIn(),
+ target->isAllocated(),
+ false));
+
+ BaseNode* curCursor = _cc->cursor();
+ if (curCursor != injectionPoint) {
+ // Additional instructions emitted to switch from the current state to
+ // the `target` state. This means that we have to move these instructions
+ // into an independent code block and patch the jump location.
+ Operand& targetOp(node->opType(node->opCount() - 1));
+ if (ASMJIT_UNLIKELY(!targetOp.isLabel()))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ Label trampoline = _cc->newLabel();
+ Label savedTarget = targetOp.as<Label>();
+
+ // Patch `target` to point to the `trampoline` we just created.
+ targetOp = trampoline;
+
+ // Clear a possible SHORT form as we have no clue now if the SHORT form would
+ // be encodable after patching the target to `trampoline` (X86 specific).
+ node->clearInstOptions(BaseInst::kOptionShortForm);
+
+ // Finalize the switch assignment sequence.
+ ASMJIT_PROPAGATE(_pass->onEmitJump(savedTarget));
+ _cc->_setCursor(injectionPoint);
+ _cc->bind(trampoline);
+ }
+
+ _cc->_setCursor(prevCursor);
+ _curAssignment.swap(_tmpAssignment);
+ }
+ else {
+ ASMJIT_PROPAGATE(_pass->setBlockEntryAssignment(target, block(), _curAssignment));
+ }
+
+ return kErrorOk;
+}
+
+Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept {
+ if (targets.empty())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (targets.size() == 1)
+ return allocBranch(node, targets[0], cont);
+
+ // The cursor must point to the previous instruction for a possible instruction insertion.
+ _cc->_setCursor(node->prev());
+
+ // All `targets` should have the same sharedAssignmentId, we just read the first.
+ RABlock* anyTarget = targets[0];
+ if (!anyTarget->hasSharedAssignmentId())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ RASharedAssignment& sharedAssignment = _pass->_sharedAssignments[anyTarget->sharedAssignmentId()];
+
+ ASMJIT_PROPAGATE(allocInst(node));
+
+ if (!sharedAssignment.empty()) {
+ ASMJIT_PROPAGATE(switchToAssignment(
+ sharedAssignment.physToWorkMap(),
+ sharedAssignment.workToPhysMap(),
+ sharedAssignment.liveIn(),
+ true, // Read-only.
+ false // Try-mode.
+ ));
+ }
+
+ ASMJIT_PROPAGATE(spillRegsBeforeEntry(anyTarget));
+
+ if (sharedAssignment.empty()) {
+ ASMJIT_PROPAGATE(_pass->setBlockEntryAssignment(anyTarget, block(), _curAssignment));
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RALocalAllocator - Decision Making]
+// ============================================================================
+
+uint32_t RALocalAllocator::decideOnAssignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
+ DebugUtils::unused(group, physId);
+ ASMJIT_ASSERT(allocableRegs != 0);
+
+ RAWorkReg* workReg = workRegById(workId);
+
+ // HIGHEST PRIORITY: Home register id.
+ if (workReg->hasHomeRegId()) {
+ uint32_t homeId = workReg->homeRegId();
+ if (Support::bitTest(allocableRegs, homeId))
+ return homeId;
+ }
+
+ // HIGH PRIORITY: Register IDs used upon block entries.
+ uint32_t previouslyAssignedRegs = workReg->allocatedMask();
+ if (allocableRegs & previouslyAssignedRegs)
+ allocableRegs &= previouslyAssignedRegs;
+
+ if (Support::isPowerOf2(allocableRegs))
+ return Support::ctz(allocableRegs);
+
+ // TODO: This is not finished.
+ return Support::ctz(allocableRegs);
+}
+
+uint32_t RALocalAllocator::decideOnUnassignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
+ ASMJIT_ASSERT(allocableRegs != 0);
+
+ // TODO:
+ DebugUtils::unused(allocableRegs, group, workId, physId);
+
+ // if (!_curAssignment.isPhysDirty(group, physId)) {
+ // }
+
+ // Decided to SPILL.
+ return RAAssignment::kPhysNone;
+}
+
+uint32_t RALocalAllocator::decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept {
+ // May be used in the future to decide which register would be best to spill so `workId` can be assigned.
+ DebugUtils::unused(workId);
+ ASMJIT_ASSERT(spillableRegs != 0);
+
+ Support::BitWordIterator<uint32_t> it(spillableRegs);
+ uint32_t bestPhysId = it.next();
+ uint32_t bestWorkId = _curAssignment.physToWorkId(group, bestPhysId);
+
+ // Avoid calculating the cost model if there is only one spillable register.
+ if (it.hasNext()) {
+ uint32_t bestCost = calculateSpillCost(group, bestWorkId, bestPhysId);
+ do {
+ uint32_t localPhysId = it.next();
+ uint32_t localWorkId = _curAssignment.physToWorkId(group, localPhysId);
+ uint32_t localCost = calculateSpillCost(group, localWorkId, localPhysId);
+
+ if (localCost < bestCost) {
+ bestCost = localCost;
+ bestPhysId = localPhysId;
+ bestWorkId = localWorkId;
+ }
+ } while (it.hasNext());
+ }
+
+ *spillWorkId = bestWorkId;
+ return bestPhysId;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/core/ralocal_p.h b/3rdparty/asmjit/src/asmjit/core/ralocal_p.h
new file mode 100644
index 00000000000..0af595b3cd5
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/ralocal_p.h
@@ -0,0 +1,281 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
+#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/raassignment_p.h"
+#include "../core/radefs_p.h"
+#include "../core/rapass_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RALocalAllocator]
+// ============================================================================
+
+//! Local register allocator.
+class RALocalAllocator {
+public:
+ ASMJIT_NONCOPYABLE(RALocalAllocator)
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Link to `RAPass`.
+ RAPass* _pass;
+ //! Link to `BaseCompiler`.
+ BaseCompiler* _cc;
+
+ //! Architecture traits.
+ RAArchTraits _archTraits;
+ //! Registers available to the allocator.
+ RARegMask _availableRegs;
+ //! Registers clobbered by the allocator.
+ RARegMask _clobberedRegs;
+
+ //! Register assignment (current).
+ RAAssignment _curAssignment;
+ //! Register assignment used temporarily during assignment switches.
+ RAAssignment _tmpAssignment;
+
+ //! Link to the current `RABlock`.
+ RABlock* _block;
+ //! InstNode.
+ InstNode* _node;
+ //! RA instruction.
+ RAInst* _raInst;
+
+ //! Count of all TiedReg's.
+ uint32_t _tiedTotal;
+ //! TiedReg's total counter.
+ RARegCount _tiedCount;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RALocalAllocator(RAPass* pass) noexcept
+ : _pass(pass),
+ _cc(pass->cc()),
+ _archTraits(pass->_archTraits),
+ _availableRegs(pass->_availableRegs),
+ _clobberedRegs(),
+ _curAssignment(),
+ _block(nullptr),
+ _node(nullptr),
+ _raInst(nullptr),
+ _tiedTotal(),
+ _tiedCount() {}
+
+ Error init() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _pass->workRegById(workId); }
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _curAssignment.physToWorkMap(); }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _curAssignment.workToPhysMap(); }
+
+ //! Returns the currently processed block.
+ inline RABlock* block() const noexcept { return _block; }
+ //! Sets the currently processed block.
+ inline void setBlock(RABlock* block) noexcept { _block = block; }
+
+ //! Returns the currently processed `InstNode`.
+ inline InstNode* node() const noexcept { return _node; }
+ //! Returns the currently processed `RAInst`.
+ inline RAInst* raInst() const noexcept { return _raInst; }
+
+ //! Returns all tied regs as `RATiedReg` array.
+ inline RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
+ //! Returns tied registers grouped by the given `group`.
+ inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return _raInst->tiedRegs(group); }
+
+ //! Returns count of all TiedRegs used by the instruction.
+ inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
+ //! Returns count of TiedRegs used by the given register `group`.
+ inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount.get(group); }
+
+ inline bool isGroupUsed(uint32_t group) const noexcept { return _tiedCount[group] != 0; }
+
+ //! \}
+
+ //! \name Assignment
+ //! \{
+
+ Error makeInitialAssignment() noexcept;
+
+ Error replaceAssignment(
+ const PhysToWorkMap* physToWorkMap,
+ const WorkToPhysMap* workToPhysMap) noexcept;
+
+ //! Switch to the given assignment by reassigning all register and emitting
+ //! code that reassigns them. This is always used to switch to a previously
+ //! stored assignment.
+ //!
+ //! If `tryMode` is true then the final assignment doesn't have to be exactly
+ //! same as specified by `dstPhysToWorkMap` and `dstWorkToPhysMap`. This mode
+ //! is only used before conditional jumps that already have assignment to
+ //! generate a code sequence that is always executed regardless of the flow.
+ Error switchToAssignment(
+ PhysToWorkMap* dstPhysToWorkMap,
+ WorkToPhysMap* dstWorkToPhysMap,
+ const ZoneBitVector& liveIn,
+ bool dstReadOnly,
+ bool tryMode) noexcept;
+
+ inline Error spillRegsBeforeEntry(RABlock* block) noexcept {
+ return spillGpScratchRegsBeforeEntry(block->entryScratchGpRegs());
+ }
+
+ Error spillGpScratchRegsBeforeEntry(uint32_t scratchRegs) noexcept;
+
+ //! \}
+
+ //! \name Allocation
+ //! \{
+
+ Error allocInst(InstNode* node) noexcept;
+ Error spillAfterAllocation(InstNode* node) noexcept;
+
+ Error allocBranch(InstNode* node, RABlock* target, RABlock* cont) noexcept;
+ Error allocJumpTable(InstNode* node, const RABlocks& targets, RABlock* cont) noexcept;
+
+ //! \}
+
+ //! \name Decision Making
+ //! \{
+
+ enum CostModel : uint32_t {
+ kCostOfFrequency = 1048576,
+ kCostOfDirtyFlag = kCostOfFrequency / 4
+ };
+
+ inline uint32_t costByFrequency(float freq) const noexcept {
+ return uint32_t(int32_t(freq * float(kCostOfFrequency)));
+ }
+
+ inline uint32_t calculateSpillCost(uint32_t group, uint32_t workId, uint32_t assignedId) const noexcept {
+ RAWorkReg* workReg = workRegById(workId);
+ uint32_t cost = costByFrequency(workReg->liveStats().freq());
+
+ if (_curAssignment.isPhysDirty(group, assignedId))
+ cost += kCostOfDirtyFlag;
+
+ return cost;
+ }
+
+ //! Decides on register assignment.
+ uint32_t decideOnAssignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
+
+ //! Decides on whether to MOVE or SPILL the given WorkReg.
+ //!
+ //! The function must return either `RAAssignment::kPhysNone`, which means that
+ //! the WorkReg should be spilled, or a valid physical register ID, which means
+ //! that the register should be moved to that physical register instead.
+ uint32_t decideOnUnassignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
+
+ //! Decides on best spill given a register mask `spillableRegs`
+ uint32_t decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept;
+
+ //! \}
+
+ //! \name Emit
+ //! \{
+
+ //! Emits a move between a destination and source register, and fixes the
+ //! register assignment.
+ inline Error onMoveReg(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
+ if (dstPhysId == srcPhysId) return kErrorOk;
+ _curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
+ return _pass->onEmitMove(workId, dstPhysId, srcPhysId);
+ }
+
+ //! Emits a swap between two physical registers and fixes their assignment.
+ //!
+ //! \note Target must support this operation otherwise this would ASSERT.
+ inline Error onSwapReg(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
+ _curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
+ return _pass->onEmitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
+ }
+
+ //! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
+ //! and makes it assigned and clean.
+ inline Error onLoadReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.assign(group, workId, physId, RAAssignment::kClean);
+ return _pass->onEmitLoad(workId, physId);
+ }
+
+ //! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
+ //! keeps it assigned, and makes it clean.
+ inline Error onSaveReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
+ ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
+
+ _curAssignment.makeClean(group, workId, physId);
+ return _pass->onEmitSave(workId, physId);
+ }
+
+ //! Assigns a register, the content of it is undefined at this point.
+ inline Error onAssignReg(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
+ _curAssignment.assign(group, workId, physId, dirty);
+ return kErrorOk;
+ }
+
+ //! Spills a variable/register, saves the content to the memory-home if modified.
+ inline Error onSpillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ if (_curAssignment.isPhysDirty(group, physId))
+ ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
+ return onKillReg(group, workId, physId);
+ }
+
+ inline Error onDirtyReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.makeDirty(group, workId, physId);
+ return kErrorOk;
+ }
+
+ inline Error onKillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
+ _curAssignment.unassign(group, workId, physId);
+ return kErrorOk;
+ }
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RALOCAL_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/rapass.cpp b/3rdparty/asmjit/src/asmjit/core/rapass.cpp
new file mode 100644
index 00000000000..1174635d75d
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/rapass.cpp
@@ -0,0 +1,1994 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/ralocal_p.h"
+#include "../core/rapass_p.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../core/zonestack.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RABlock - Control Flow]
+// ============================================================================
+
+Error RABlock::appendSuccessor(RABlock* successor) noexcept {
+ RABlock* predecessor = this;
+
+ if (predecessor->_successors.contains(successor))
+ return kErrorOk;
+ ASMJIT_ASSERT(!successor->_predecessors.contains(predecessor));
+
+ ASMJIT_PROPAGATE(successor->_predecessors.willGrow(allocator()));
+ ASMJIT_PROPAGATE(predecessor->_successors.willGrow(allocator()));
+
+ predecessor->_successors.appendUnsafe(successor);
+ successor->_predecessors.appendUnsafe(predecessor);
+
+ return kErrorOk;
+}
+
+Error RABlock::prependSuccessor(RABlock* successor) noexcept {
+ RABlock* predecessor = this;
+
+ if (predecessor->_successors.contains(successor))
+ return kErrorOk;
+ ASMJIT_ASSERT(!successor->_predecessors.contains(predecessor));
+
+ ASMJIT_PROPAGATE(successor->_predecessors.willGrow(allocator()));
+ ASMJIT_PROPAGATE(predecessor->_successors.willGrow(allocator()));
+
+ predecessor->_successors.prependUnsafe(successor);
+ successor->_predecessors.prependUnsafe(predecessor);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Construction / Destruction]
+// ============================================================================
+
+RAPass::RAPass() noexcept
+ : FuncPass("RAPass"),
+ _allocator(),
+ _logger(nullptr),
+ _debugLogger(nullptr),
+ _loggerFlags(0),
+ _func(nullptr),
+ _stop(nullptr),
+ _extraBlock(nullptr),
+ _blocks(),
+ _exits(),
+ _pov(),
+ _instructionCount(0),
+ _createdBlockCount(0),
+ _sharedAssignments(),
+ _lastTimestamp(0),
+ _archRegsInfo(nullptr),
+ _archTraits(),
+ _physRegIndex(),
+ _physRegCount(),
+ _physRegTotal(0),
+ _scratchRegIndexes{},
+ _availableRegs(),
+ _availableRegCount(),
+ _clobberedRegs(),
+ _globalMaxLiveCount(),
+ _globalLiveSpans {},
+ _temporaryMem(),
+ _sp(),
+ _fp(),
+ _stackAllocator(),
+ _argsAssignment(),
+ _numStackArgsToStackSlots(0),
+ _maxWorkRegNameSize(0) {}
+RAPass::~RAPass() noexcept {}
+
+// ============================================================================
+// [asmjit::RAPass - RunOnFunction]
+// ============================================================================
+
+static void RAPass_reset(RAPass* self, FuncDetail* funcDetail) noexcept {
+ ZoneAllocator* allocator = self->allocator();
+
+ self->_blocks.reset();
+ self->_exits.reset();
+ self->_pov.reset();
+ self->_workRegs.reset();
+ self->_instructionCount = 0;
+ self->_createdBlockCount = 0;
+
+ self->_sharedAssignments.reset();
+ self->_lastTimestamp = 0;
+
+ self->_archRegsInfo = nullptr;
+ self->_archTraits.reset();
+ self->_physRegIndex.reset();
+ self->_physRegCount.reset();
+ self->_physRegTotal = 0;
+
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(self->_scratchRegIndexes); i++)
+ self->_scratchRegIndexes[i] = BaseReg::kIdBad;
+
+ self->_availableRegs.reset();
+ self->_availableRegCount.reset();
+ self->_clobberedRegs.reset();
+
+ self->_workRegs.reset();
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ self->_workRegsOfGroup[group].reset();
+ self->_strategy[group].reset();
+ self->_globalLiveSpans[group] = nullptr;
+ }
+ self->_globalMaxLiveCount.reset();
+ self->_temporaryMem.reset();
+
+ self->_stackAllocator.reset(allocator);
+ self->_argsAssignment.reset(funcDetail);
+ self->_numStackArgsToStackSlots = 0;
+ self->_maxWorkRegNameSize = 0;
+}
+
+static void RAPass_resetVirtRegData(RAPass* self) noexcept {
+ // Zero everything so it cannot be used by accident.
+ for (RAWorkReg* wReg : self->_workRegs) {
+ VirtReg* vReg = wReg->virtReg();
+ vReg->_workReg = nullptr;
+ }
+}
+
+Error RAPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) noexcept {
+ _allocator.reset(zone);
+
+#ifndef ASMJIT_NO_LOGGING
+ _logger = logger;
+ _debugLogger = nullptr;
+
+ if (logger) {
+ _loggerFlags = logger->flags();
+ if (_loggerFlags & FormatOptions::kFlagDebugPasses)
+ _debugLogger = logger;
+ }
+#else
+ DebugUtils::unused(logger);
+#endif
+
+ // Initialize all core structures to use `zone` and `func`.
+ BaseNode* end = func->endNode();
+ _func = func;
+ _stop = end->next();
+ _extraBlock = end;
+
+ RAPass_reset(this, &_func->_funcDetail);
+
+ // Initialize architecture-specific members.
+ onInit();
+
+ // Perform all allocation steps required.
+ Error err = onPerformAllSteps();
+
+ // Must be called regardless of the allocation status.
+ onDone();
+
+ // TODO: I don't like this...
+ // Reset possible connections introduced by the register allocator.
+ RAPass_resetVirtRegData(this);
+
+ // Reset all core structures and everything that depends on the passed `Zone`.
+ RAPass_reset(this, nullptr);
+ _allocator.reset(nullptr);
+
+#ifndef ASMJIT_NO_LOGGING
+ _logger = nullptr;
+ _debugLogger = nullptr;
+ _loggerFlags = 0;
+#endif
+
+ _func = nullptr;
+ _stop = nullptr;
+ _extraBlock = nullptr;
+
+ // Reset `Zone` as nothing should persist between `runOnFunction()` calls.
+ zone->reset();
+
+ // We alter the compiler cursor, because it doesn't make sense to reference
+ // it after the compilation - some nodes may disappear and the old cursor
+ // can go out anyway.
+ cc()->_setCursor(cc()->lastNode());
+
+ return err;
+}
+
+Error RAPass::onPerformAllSteps() noexcept {
+ ASMJIT_PROPAGATE(buildCFG());
+ ASMJIT_PROPAGATE(buildViews());
+ ASMJIT_PROPAGATE(removeUnreachableBlocks());
+
+ ASMJIT_PROPAGATE(buildDominators());
+ ASMJIT_PROPAGATE(buildLiveness());
+ ASMJIT_PROPAGATE(assignArgIndexToWorkRegs());
+
+#ifndef ASMJIT_NO_LOGGING
+ if (logger() && logger()->hasFlag(FormatOptions::kFlagAnnotations))
+ ASMJIT_PROPAGATE(annotateCode());
+#endif
+
+ ASMJIT_PROPAGATE(runGlobalAllocator());
+ ASMJIT_PROPAGATE(runLocalAllocator());
+
+ ASMJIT_PROPAGATE(updateStackFrame());
+ ASMJIT_PROPAGATE(insertPrologEpilog());
+
+ ASMJIT_PROPAGATE(rewrite());
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Basic Block Management]
+// ============================================================================
+
+RABlock* RAPass::newBlock(BaseNode* initialNode) noexcept {
+ RABlock* block = zone()->newT<RABlock>(this);
+ if (ASMJIT_UNLIKELY(!block))
+ return nullptr;
+
+ block->setFirst(initialNode);
+ block->setLast(initialNode);
+
+ _createdBlockCount++;
+ return block;
+}
+
+RABlock* RAPass::newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt) noexcept {
+ if (cbLabel->hasPassData())
+ return cbLabel->passData<RABlock>();
+
+ FuncNode* func = this->func();
+ BaseNode* node = cbLabel->prev();
+ RABlock* block = nullptr;
+
+ // Try to find some label, but terminate the loop on any code. We try hard to
+ // coalesce code that contains two consecutive labels or a combination of
+ // non-code nodes between 2 or more labels.
+ //
+ // Possible cases that would share the same basic block:
+ //
+ // 1. Two or more consecutive labels:
+ // Label1:
+ // Label2:
+ //
+ // 2. Two or more labels separated by non-code nodes:
+ // Label1:
+ // ; Some comment...
+ // .align 16
+ // Label2:
+ size_t nPendingLabels = 0;
+
+ while (node) {
+ if (node->type() == BaseNode::kNodeLabel) {
+ // Function has a different NodeType, just make sure this was not messed
+ // up as we must never associate BasicBlock with a `func` itself.
+ ASMJIT_ASSERT(node != func);
+
+ block = node->passData<RABlock>();
+ if (block) {
+ // Exit node has always a block associated with it. If we went here it
+ // means that `cbLabel` passed here is after the end of the function
+ // and cannot be merged with the function exit block.
+ if (node == func->exitNode())
+ block = nullptr;
+ break;
+ }
+
+ nPendingLabels++;
+ }
+ else if (node->type() == BaseNode::kNodeAlign) {
+ // Align node is fine.
+ }
+ else {
+ break;
+ }
+
+ node = node->prev();
+ }
+
+ if (stoppedAt)
+ *stoppedAt = node;
+
+ if (!block) {
+ block = newBlock();
+ if (ASMJIT_UNLIKELY(!block))
+ return nullptr;
+ }
+
+ cbLabel->setPassData<RABlock>(block);
+ node = cbLabel;
+
+ while (nPendingLabels) {
+ node = node->prev();
+ for (;;) {
+ if (node->type() == BaseNode::kNodeLabel) {
+ node->setPassData<RABlock>(block);
+ nPendingLabels--;
+ break;
+ }
+
+ node = node->prev();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+ }
+
+ if (!block->first()) {
+ block->setFirst(node);
+ block->setLast(cbLabel);
+ }
+
+ return block;
+}
+
+Error RAPass::addBlock(RABlock* block) noexcept {
+ ASMJIT_PROPAGATE(_blocks.willGrow(allocator()));
+
+ block->_blockId = blockCount();
+ _blocks.appendUnsafe(block);
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Build]
+// ============================================================================
+
+Error RAPass::initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept {
+ if (sharedAssignmentsMap.empty())
+ return kErrorOk;
+
+ uint32_t count = 0;
+ for (RABlock* block : _blocks) {
+ if (block->hasSharedAssignmentId()) {
+ uint32_t sharedAssignmentId = sharedAssignmentsMap[block->sharedAssignmentId()];
+ block->setSharedAssignmentId(sharedAssignmentId);
+ count = Support::max(count, sharedAssignmentId + 1);
+ }
+ }
+
+ ASMJIT_PROPAGATE(_sharedAssignments.resize(allocator(), count));
+
+ // Aggregate all entry scratch GP regs from blocks of the same assignment to
+ // the assignment itself. It will then be used instead of RABlock's own scratch
+ // regs mask, as shared assignments have precedence.
+ for (RABlock* block : _blocks) {
+ if (block->hasSharedAssignmentId()) {
+ RASharedAssignment& sa = _sharedAssignments[block->sharedAssignmentId()];
+ sa.addScratchGpRegs(block->_entryScratchGpRegs);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Views Order]
+// ============================================================================
+
+class RABlockVisitItem {
+public:
+ inline RABlockVisitItem(RABlock* block, uint32_t index) noexcept
+ : _block(block),
+ _index(index) {}
+
+ inline RABlockVisitItem(const RABlockVisitItem& other) noexcept
+ : _block(other._block),
+ _index(other._index) {}
+
+ inline RABlockVisitItem& operator=(const RABlockVisitItem& other) noexcept = default;
+
+ inline RABlock* block() const noexcept { return _block; }
+ inline uint32_t index() const noexcept { return _index; }
+
+ RABlock* _block;
+ uint32_t _index;
+};
+
+Error RAPass::buildViews() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildViews]\n");
+#endif
+
+ uint32_t count = blockCount();
+ if (ASMJIT_UNLIKELY(!count)) return kErrorOk;
+
+ ASMJIT_PROPAGATE(_pov.reserve(allocator(), count));
+
+ ZoneStack<RABlockVisitItem> stack;
+ ASMJIT_PROPAGATE(stack.init(allocator()));
+
+ ZoneBitVector visited;
+ ASMJIT_PROPAGATE(visited.resize(allocator(), count));
+
+ RABlock* current = _blocks[0];
+ uint32_t i = 0;
+
+ for (;;) {
+ for (;;) {
+ if (i >= current->successors().size())
+ break;
+
+ // Skip if already visited.
+ RABlock* child = current->successors()[i++];
+ if (visited.bitAt(child->blockId()))
+ continue;
+
+ // Mark as visited to prevent visiting the same block multiple times.
+ visited.setBit(child->blockId(), true);
+
+ // Add the current block on the stack, we will get back to it later.
+ ASMJIT_PROPAGATE(stack.append(RABlockVisitItem(current, i)));
+ current = child;
+ i = 0;
+ }
+
+ current->makeReachable();
+ current->_povOrder = _pov.size();
+ _pov.appendUnsafe(current);
+
+ if (stack.empty())
+ break;
+
+ RABlockVisitItem top = stack.pop();
+ current = top.block();
+ i = top.index();
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ StringTmp<1024> sb;
+ for (RABlock* block : blocks()) {
+ sb.clear();
+ if (block->hasSuccessors()) {
+ sb.appendFormat(" #%u -> {", block->blockId());
+ _dumpBlockIds(sb, block->successors());
+ sb.appendString("}\n");
+ }
+ else {
+ sb.appendFormat(" #%u -> {Exit}\n", block->blockId());
+ }
+ logger->log(sb);
+ }
+ });
+
+ visited.release(allocator());
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Dominators]
+// ============================================================================
+
+static ASMJIT_INLINE RABlock* intersectBlocks(RABlock* b1, RABlock* b2) noexcept {
+ while (b1 != b2) {
+ while (b2->povOrder() > b1->povOrder()) b1 = b1->iDom();
+ while (b1->povOrder() > b2->povOrder()) b2 = b2->iDom();
+ }
+ return b1;
+}
+
+// Based on "A Simple, Fast Dominance Algorithm".
+Error RAPass::buildDominators() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildDominators]\n");
+#endif
+
+ if (_blocks.empty())
+ return kErrorOk;
+
+ RABlock* entryBlock = this->entryBlock();
+ entryBlock->setIDom(entryBlock);
+
+ bool changed = true;
+ uint32_t nIters = 0;
+
+ while (changed) {
+ nIters++;
+ changed = false;
+
+ uint32_t i = _pov.size();
+ while (i) {
+ RABlock* block = _pov[--i];
+ if (block == entryBlock)
+ continue;
+
+ RABlock* iDom = nullptr;
+ const RABlocks& preds = block->predecessors();
+
+ uint32_t j = preds.size();
+ while (j) {
+ RABlock* p = preds[--j];
+ if (!p->iDom()) continue;
+ iDom = !iDom ? p : intersectBlocks(iDom, p);
+ }
+
+ if (block->iDom() != iDom) {
+ ASMJIT_RA_LOG_FORMAT(" IDom of #%u -> #%u\n", block->blockId(), iDom->blockId());
+ block->setIDom(iDom);
+ changed = true;
+ }
+ }
+ }
+
+ ASMJIT_RA_LOG_FORMAT(" Done (%u iterations)\n", nIters);
+ return kErrorOk;
+}
+
+bool RAPass::_strictlyDominates(const RABlock* a, const RABlock* b) const noexcept {
+ ASMJIT_ASSERT(a != nullptr); // There must be at least one block if this function is
+ ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
+ ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `strictlyDominates()`.
+
+ // Nothing strictly dominates the entry block.
+ const RABlock* entryBlock = this->entryBlock();
+ if (a == entryBlock)
+ return false;
+
+ const RABlock* iDom = b->iDom();
+ while (iDom != a && iDom != entryBlock)
+ iDom = iDom->iDom();
+
+ return iDom != entryBlock;
+}
+
+const RABlock* RAPass::_nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept {
+ ASMJIT_ASSERT(a != nullptr); // There must be at least one block if this function is
+ ASMJIT_ASSERT(b != nullptr); // called, as both `a` and `b` must be valid blocks.
+ ASMJIT_ASSERT(a != b); // Checked by `dominates()` and `properlyDominates()`.
+
+ if (a == b)
+ return a;
+
+ // If `a` strictly dominates `b` then `a` is the nearest common dominator.
+ if (_strictlyDominates(a, b))
+ return a;
+
+ // If `b` strictly dominates `a` then `b` is the nearest common dominator.
+ if (_strictlyDominates(b, a))
+ return b;
+
+ const RABlock* entryBlock = this->entryBlock();
+ uint64_t timestamp = nextTimestamp();
+
+ // Mark all A's dominators.
+ const RABlock* block = a->iDom();
+ while (block != entryBlock) {
+ block->setTimestamp(timestamp);
+ block = block->iDom();
+ }
+
+ // Check all B's dominators against marked dominators of A.
+ block = b->iDom();
+ while (block != entryBlock) {
+ if (block->hasTimestamp(timestamp))
+ return block;
+ block = block->iDom();
+ }
+
+ return entryBlock;
+}
+
+// ============================================================================
+// [asmjit::RAPass - CFG - Utilities]
+// ============================================================================
+
+Error RAPass::removeUnreachableBlocks() noexcept {
+ uint32_t numAllBlocks = blockCount();
+ uint32_t numReachableBlocks = reachableBlockCount();
+
+ // All reachable -> nothing to do.
+ if (numAllBlocks == numReachableBlocks)
+ return kErrorOk;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::RemoveUnreachableBlocks (%u of %u unreachable)]\n", numAllBlocks - numReachableBlocks, numAllBlocks);
+#endif
+
+ for (uint32_t i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+ if (block->isReachable())
+ continue;
+
+ ASMJIT_RA_LOG_FORMAT(" Removing block {%u}\n", i);
+ BaseNode* first = block->first();
+ BaseNode* last = block->last();
+
+ BaseNode* beforeFirst = first->prev();
+ BaseNode* afterLast = last->next();
+
+ BaseNode* node = first;
+ while (node != afterLast) {
+ BaseNode* next = node->next();
+
+ if (node->isCode() || node->isRemovable())
+ cc()->removeNode(node);
+ node = next;
+ }
+
+ if (beforeFirst->next() == afterLast) {
+ block->setFirst(nullptr);
+ block->setLast(nullptr);
+ }
+ else {
+ block->setFirst(beforeFirst->next());
+ block->setLast(afterLast->prev());
+ }
+ }
+
+ return kErrorOk;
+}
+
+BaseNode* RAPass::findSuccessorStartingAt(BaseNode* node) noexcept {
+ while (node && (node->isInformative() || node->hasNoEffect()))
+ node = node->next();
+ return node;
+}
+
+bool RAPass::isNextTo(BaseNode* node, BaseNode* target) noexcept {
+ for (;;) {
+ node = node->next();
+ if (node == target)
+ return true;
+
+ if (!node)
+ return false;
+
+ if (node->isCode() || node->isData())
+ return false;
+ }
+}
+
+// ============================================================================
+// [asmjit::RAPass - ?]
+// ============================================================================
+
+Error RAPass::_asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
+ // Checked by `asWorkReg()` - must be true.
+ ASMJIT_ASSERT(vReg->_workReg == nullptr);
+
+ uint32_t group = vReg->group();
+ ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
+
+ RAWorkRegs& wRegs = workRegs();
+ RAWorkRegs& wRegsByGroup = workRegs(group);
+
+ ASMJIT_PROPAGATE(wRegs.willGrow(allocator()));
+ ASMJIT_PROPAGATE(wRegsByGroup.willGrow(allocator()));
+
+ RAWorkReg* wReg = zone()->newT<RAWorkReg>(vReg, wRegs.size());
+ if (ASMJIT_UNLIKELY(!wReg))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ vReg->setWorkReg(wReg);
+ if (!vReg->isStack())
+ wReg->setRegByteMask(Support::lsbMask<uint64_t>(vReg->virtSize()));
+ wRegs.appendUnsafe(wReg);
+ wRegsByGroup.appendUnsafe(wReg);
+
+ // Only used by RA logging.
+ _maxWorkRegNameSize = Support::max(_maxWorkRegNameSize, vReg->nameSize());
+
+ *out = wReg;
+ return kErrorOk;
+}
+
+RAAssignment::WorkToPhysMap* RAPass::newWorkToPhysMap() noexcept {
+ uint32_t count = workRegCount();
+ size_t size = WorkToPhysMap::sizeOf(count);
+
+ // If no registers are used it could be zero, in that case return a dummy
+ // map instead of NULL.
+ if (ASMJIT_UNLIKELY(!size)) {
+ static const RAAssignment::WorkToPhysMap nullMap = {{ 0 }};
+ return const_cast<RAAssignment::WorkToPhysMap*>(&nullMap);
+ }
+
+ WorkToPhysMap* map = zone()->allocT<WorkToPhysMap>(size);
+ if (ASMJIT_UNLIKELY(!map))
+ return nullptr;
+
+ map->reset(count);
+ return map;
+}
+
+RAAssignment::PhysToWorkMap* RAPass::newPhysToWorkMap() noexcept {
+ uint32_t count = physRegTotal();
+ size_t size = PhysToWorkMap::sizeOf(count);
+
+ PhysToWorkMap* map = zone()->allocT<PhysToWorkMap>(size);
+ if (ASMJIT_UNLIKELY(!map))
+ return nullptr;
+
+ map->reset(count);
+ return map;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Registers - Liveness Analysis and Statistics]
+// ============================================================================
+
+namespace LiveOps {
+ typedef ZoneBitVector::BitWord BitWord;
+
+ struct In {
+ static ASMJIT_INLINE BitWord op(BitWord dst, BitWord out, BitWord gen, BitWord kill) noexcept {
+ DebugUtils::unused(dst);
+ return (out | gen) & ~kill;
+ }
+ };
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i], b[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ template<typename Operator>
+ static ASMJIT_INLINE bool op(BitWord* dst, const BitWord* a, const BitWord* b, const BitWord* c, uint32_t n) noexcept {
+ BitWord changed = 0;
+
+ for (uint32_t i = 0; i < n; i++) {
+ BitWord before = dst[i];
+ BitWord after = Operator::op(before, a[i], b[i], c[i]);
+
+ dst[i] = after;
+ changed |= (before ^ after);
+ }
+
+ return changed != 0;
+ }
+
+ static ASMJIT_INLINE bool recalcInOut(RABlock* block, uint32_t numBitWords, bool initial = false) noexcept {
+ bool changed = initial;
+
+ const RABlocks& successors = block->successors();
+ uint32_t numSuccessors = successors.size();
+
+ // Calculate `OUT` based on `IN` of all successors.
+ for (uint32_t i = 0; i < numSuccessors; i++)
+ changed |= op<Support::Or>(block->liveOut().data(), successors[i]->liveIn().data(), numBitWords);
+
+ // Calculate `IN` based on `OUT`, `GEN`, and `KILL` bits.
+ if (changed)
+ changed = op<In>(block->liveIn().data(), block->liveOut().data(), block->gen().data(), block->kill().data(), numBitWords);
+
+ return changed;
+ }
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::buildLiveness() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ StringTmp<512> sb;
+#endif
+
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BuildLiveness]\n");
+
+ uint32_t i;
+
+ uint32_t numAllBlocks = blockCount();
+ uint32_t numReachableBlocks = reachableBlockCount();
+
+ uint32_t numVisits = numReachableBlocks;
+ uint32_t numWorkRegs = workRegCount();
+ uint32_t numBitWords = ZoneBitVector::_wordsPerBits(numWorkRegs);
+
+ if (!numWorkRegs) {
+ ASMJIT_RA_LOG_FORMAT(" Done (no virtual registers)\n");
+ return kErrorOk;
+ }
+
+ ZoneVector<uint32_t> nUsesPerWorkReg; // Number of USEs of each RAWorkReg.
+ ZoneVector<uint32_t> nOutsPerWorkReg; // Number of OUTs of each RAWorkReg.
+ ZoneVector<uint32_t> nInstsPerBlock; // Number of instructions of each RABlock.
+
+ ASMJIT_PROPAGATE(nUsesPerWorkReg.resize(allocator(), numWorkRegs));
+ ASMJIT_PROPAGATE(nOutsPerWorkReg.resize(allocator(), numWorkRegs));
+ ASMJIT_PROPAGATE(nInstsPerBlock.resize(allocator(), numAllBlocks));
+
+ // --------------------------------------------------------------------------
+ // Calculate GEN/KILL of each block.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numReachableBlocks; i++) {
+ RABlock* block = _pov[i];
+ ASMJIT_PROPAGATE(block->resizeLiveBits(numWorkRegs));
+
+ BaseNode* node = block->last();
+ BaseNode* stop = block->first();
+
+ uint32_t nInsts = 0;
+ for (;;) {
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = inst->passData<RAInst>();
+ ASMJIT_ASSERT(raInst != nullptr);
+
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t count = raInst->tiedCount();
+
+ for (uint32_t j = 0; j < count; j++) {
+ RATiedReg* tiedReg = &tiedRegs[j];
+ uint32_t workId = tiedReg->workId();
+
+ // Update `nUses` and `nOuts`.
+ nUsesPerWorkReg[workId] += 1u;
+ nOutsPerWorkReg[workId] += uint32_t(tiedReg->isWrite());
+
+ // Mark as:
+ // KILL - if this VirtReg is killed afterwards.
+ // LAST - if this VirtReg is last in this basic block.
+ if (block->kill().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kKill);
+ else if (!block->gen().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kLast);
+
+ if (tiedReg->isWriteOnly()) {
+ // KILL.
+ block->kill().setBit(workId, true);
+ }
+ else {
+ // GEN.
+ block->kill().setBit(workId, false);
+ block->gen().setBit(workId, true);
+ }
+ }
+
+ nInsts++;
+ }
+
+ if (node == stop)
+ break;
+
+ node = node->prev();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+
+ nInstsPerBlock[block->blockId()] = nInsts;
+ }
+
+ // --------------------------------------------------------------------------
+ // Calculate IN/OUT of each block.
+ // --------------------------------------------------------------------------
+
+ {
+ ZoneStack<RABlock*> workList;
+ ZoneBitVector workBits;
+
+ ASMJIT_PROPAGATE(workList.init(allocator()));
+ ASMJIT_PROPAGATE(workBits.resize(allocator(), blockCount(), true));
+
+ for (i = 0; i < numReachableBlocks; i++) {
+ RABlock* block = _pov[i];
+ LiveOps::recalcInOut(block, numBitWords, true);
+ ASMJIT_PROPAGATE(workList.append(block));
+ }
+
+ while (!workList.empty()) {
+ RABlock* block = workList.popFirst();
+ uint32_t blockId = block->blockId();
+
+ workBits.setBit(blockId, false);
+ if (LiveOps::recalcInOut(block, numBitWords)) {
+ const RABlocks& predecessors = block->predecessors();
+ uint32_t numPredecessors = predecessors.size();
+
+ for (uint32_t j = 0; j < numPredecessors; j++) {
+ RABlock* pred = predecessors[j];
+ if (!workBits.bitAt(pred->blockId())) {
+ workBits.setBit(pred->blockId(), true);
+ ASMJIT_PROPAGATE(workList.append(pred));
+ }
+ }
+ }
+ numVisits++;
+ }
+
+ workList.reset();
+ workBits.release(allocator());
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ logger->logf(" LiveIn/Out Done (%u visits)\n", numVisits);
+ for (i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+
+ ASMJIT_PROPAGATE(sb.assignFormat(" {#%u}\n", block->blockId()));
+ ASMJIT_PROPAGATE(_dumpBlockLiveness(sb, block));
+
+ logger->log(sb);
+ }
+ });
+
+ // --------------------------------------------------------------------------
+ // Reserve the space in each `RAWorkReg` for references.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegById(i);
+ ASMJIT_PROPAGATE(workReg->_refs.reserve(allocator(), nUsesPerWorkReg[i]));
+ ASMJIT_PROPAGATE(workReg->_writes.reserve(allocator(), nOutsPerWorkReg[i]));
+ }
+
+ // --------------------------------------------------------------------------
+ // Assign block and instruction positions, build LiveCount and LiveSpans.
+ // --------------------------------------------------------------------------
+
+ uint32_t position = 2;
+ for (i = 0; i < numAllBlocks; i++) {
+ RABlock* block = _blocks[i];
+ if (!block->isReachable())
+ continue;
+
+ BaseNode* node = block->first();
+ BaseNode* stop = block->last();
+
+ uint32_t endPosition = position + nInstsPerBlock[i] * 2;
+ block->setFirstPosition(position);
+ block->setEndPosition(endPosition);
+
+ RALiveCount curLiveCount;
+ RALiveCount maxLiveCount;
+
+ // Process LIVE-IN.
+ ZoneBitVector::ForEachBitSet it(block->liveIn());
+ while (it.hasNext()) {
+ RAWorkReg* workReg = _workRegs[uint32_t(it.next())];
+ curLiveCount[workReg->group()]++;
+ ASMJIT_PROPAGATE(workReg->liveSpans().openAt(allocator(), position, endPosition));
+ }
+
+ for (;;) {
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = inst->passData<RAInst>();
+ ASMJIT_ASSERT(raInst != nullptr);
+
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t count = raInst->tiedCount();
+
+ inst->setPosition(position);
+ raInst->_liveCount = curLiveCount;
+
+ for (uint32_t j = 0; j < count; j++) {
+ RATiedReg* tiedReg = &tiedRegs[j];
+ uint32_t workId = tiedReg->workId();
+
+ // Create refs and writes.
+ RAWorkReg* workReg = workRegById(workId);
+ workReg->_refs.appendUnsafe(node);
+ if (tiedReg->isWrite())
+ workReg->_writes.appendUnsafe(node);
+
+ // We couldn't calculate this in previous steps, but since we know all LIVE-OUT
+ // at this point it becomes trivial. If this is the last instruction that uses
+ // this `workReg` and it's not LIVE-OUT then it is KILLed here.
+ if (tiedReg->isLast() && !block->liveOut().bitAt(workId))
+ tiedReg->addFlags(RATiedReg::kKill);
+
+ LiveRegSpans& liveSpans = workReg->liveSpans();
+ bool wasOpen;
+ ASMJIT_PROPAGATE(liveSpans.openAt(allocator(), position + !tiedReg->isRead(), endPosition, wasOpen));
+
+ uint32_t group = workReg->group();
+ if (!wasOpen) {
+ curLiveCount[group]++;
+ raInst->_liveCount[group]++;
+ }
+
+ if (tiedReg->isKill()) {
+ liveSpans.closeAt(position + !tiedReg->isRead() + 1);
+ curLiveCount[group]--;
+ }
+
+ // Update `RAWorkReg::hintRegId`.
+ if (tiedReg->hasUseId() && !workReg->hasHintRegId()) {
+ uint32_t useId = tiedReg->useId();
+ if (!(raInst->_clobberedRegs[group] & Support::bitMask(useId)))
+ workReg->setHintRegId(useId);
+ }
+
+ // Update `RAWorkReg::clobberedSurvivalMask`.
+ if (raInst->_clobberedRegs[group] && !tiedReg->isOutOrKill())
+ workReg->addClobberSurvivalMask(raInst->_clobberedRegs[group]);
+ }
+
+ position += 2;
+ maxLiveCount.op<Support::Max>(raInst->_liveCount);
+ }
+
+ if (node == stop)
+ break;
+
+ node = node->next();
+ ASMJIT_ASSERT(node != nullptr);
+ }
+
+ block->_maxLiveCount = maxLiveCount;
+ _globalMaxLiveCount.op<Support::Max>(maxLiveCount);
+ ASMJIT_ASSERT(position == block->endPosition());
+ }
+
+ // --------------------------------------------------------------------------
+ // Calculate WorkReg statistics.
+ // --------------------------------------------------------------------------
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = _workRegs[i];
+
+ LiveRegSpans& spans = workReg->liveSpans();
+ uint32_t width = spans.width();
+ float freq = width ? float(double(workReg->_refs.size()) / double(width)) : float(0);
+
+ RALiveStats& stats = workReg->liveStats();
+ stats._width = width;
+ stats._freq = freq;
+ stats._priority = freq + float(int(workReg->virtReg()->weight())) * 0.01f;
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ sb.clear();
+ _dumpLiveSpans(sb);
+ logger->log(sb);
+ });
+
+ nUsesPerWorkReg.release(allocator());
+ nOutsPerWorkReg.release(allocator());
+ nInstsPerBlock.release(allocator());
+
+ return kErrorOk;
+}
+
+Error RAPass::assignArgIndexToWorkRegs() noexcept {
+ ZoneBitVector& liveIn = entryBlock()->liveIn();
+ uint32_t argCount = func()->argCount();
+
+ for (uint32_t i = 0; i < argCount; i++) {
+ // Unassigned argument.
+ VirtReg* virtReg = func()->arg(i);
+ if (!virtReg) continue;
+
+ // Unreferenced argument.
+ RAWorkReg* workReg = virtReg->workReg();
+ if (!workReg) continue;
+
+ // Overwritten argument.
+ uint32_t workId = workReg->workId();
+ if (!liveIn.bitAt(workId))
+ continue;
+
+ workReg->setArgIndex(i);
+
+ const FuncValue& arg = func()->detail().arg(i);
+ if (arg.isReg() && _archRegsInfo->regInfo[arg.regType()].group() == workReg->group()) {
+ workReg->setHintRegId(arg.regId());
+ }
+ }
+
+ return kErrorOk;
+}
+// ============================================================================
+// [asmjit::RAPass - Allocation - Global]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+static void RAPass_dumpSpans(String& sb, uint32_t index, const LiveRegSpans& liveSpans) noexcept {
+ sb.appendFormat(" %02u: ", index);
+
+ for (uint32_t i = 0; i < liveSpans.size(); i++) {
+ const LiveRegSpan& liveSpan = liveSpans[i];
+ if (i) sb.appendString(", ");
+ sb.appendFormat("[%u:%u@%u]", liveSpan.a, liveSpan.b, liveSpan.id);
+ }
+
+ sb.appendChar('\n');
+}
+#endif
+
+Error RAPass::runGlobalAllocator() noexcept {
+ ASMJIT_PROPAGATE(initGlobalLiveSpans());
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ ASMJIT_PROPAGATE(binPack(group));
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::initGlobalLiveSpans() noexcept {
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ size_t physCount = _physRegCount[group];
+ LiveRegSpans* liveSpans = allocator()->allocT<LiveRegSpans>(physCount * sizeof(LiveRegSpans));
+
+ if (ASMJIT_UNLIKELY(!liveSpans))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ for (size_t physId = 0; physId < physCount; physId++)
+ new(&liveSpans[physId]) LiveRegSpans();
+
+ _globalLiveSpans[group] = liveSpans;
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::binPack(uint32_t group) noexcept {
+ if (workRegCount(group) == 0)
+ return kErrorOk;
+
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ StringTmp<512> sb;
+
+ ASMJIT_RA_LOG_FORMAT("[RAPass::BinPack] Available=%u (0x%08X) Count=%u\n",
+ Support::popcnt(_availableRegs[group]),
+ _availableRegs[group],
+ workRegCount(group));
+#endif
+
+ uint32_t i;
+ uint32_t physCount = _physRegCount[group];
+
+ RAWorkRegs workRegs;
+ LiveRegSpans tmpSpans;
+
+ ASMJIT_PROPAGATE(workRegs.concat(allocator(), this->workRegs(group)));
+ workRegs.sort([](const RAWorkReg* a, const RAWorkReg* b) noexcept {
+ return b->liveStats().priority() - a->liveStats().priority();
+ });
+
+ uint32_t numWorkRegs = workRegs.size();
+ uint32_t availableRegs = _availableRegs[group];
+
+ // First try to pack everything that provides register-id hint as these are
+ // most likely function arguments and fixed (precolored) virtual registers.
+ if (!workRegs.empty()) {
+ uint32_t dstIndex = 0;
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ if (workReg->hasHintRegId()) {
+ uint32_t physId = workReg->hintRegId();
+ if (availableRegs & Support::bitMask(physId)) {
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ Error err = tmpSpans.nonOverlappingUnionOf(allocator(), live, workReg->liveSpans(), LiveRegData(workReg->virtId()));
+
+ if (err == kErrorOk) {
+ workReg->setHomeRegId(physId);
+ live.swap(tmpSpans);
+ continue;
+ }
+
+ if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
+ return err;
+ }
+ }
+
+ workRegs[dstIndex++] = workReg;
+ }
+
+ workRegs._setSize(dstIndex);
+ numWorkRegs = dstIndex;
+ }
+
+ // Try to pack the rest.
+ if (!workRegs.empty()) {
+ uint32_t dstIndex = 0;
+
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ uint32_t physRegs = availableRegs;
+
+ while (physRegs) {
+ uint32_t physId = Support::ctz(physRegs);
+ if (workReg->clobberSurvivalMask()) {
+ uint32_t preferredMask = physRegs & workReg->clobberSurvivalMask();
+ if (preferredMask)
+ physId = Support::ctz(preferredMask);
+ }
+
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ Error err = tmpSpans.nonOverlappingUnionOf(allocator(), live, workReg->liveSpans(), LiveRegData(workReg->virtId()));
+
+ if (err == kErrorOk) {
+ workReg->setHomeRegId(physId);
+ live.swap(tmpSpans);
+ break;
+ }
+
+ if (ASMJIT_UNLIKELY(err != 0xFFFFFFFFu))
+ return err;
+
+ physRegs ^= Support::bitMask(physId);
+ }
+
+ // Keep it in `workRegs` if it was not allocated.
+ if (!physRegs)
+ workRegs[dstIndex++] = workReg;
+ }
+
+ workRegs._setSize(dstIndex);
+ numWorkRegs = dstIndex;
+ }
+
+ ASMJIT_RA_LOG_COMPLEX({
+ for (uint32_t physId = 0; physId < physCount; physId++) {
+ LiveRegSpans& live = _globalLiveSpans[group][physId];
+ if (live.empty())
+ continue;
+
+ sb.clear();
+ RAPass_dumpSpans(sb, physId, live);
+ logger->log(sb);
+ }
+ });
+
+ // Maybe unused if logging is disabled.
+ DebugUtils::unused(physCount);
+
+ if (workRegs.empty()) {
+ ASMJIT_RA_LOG_FORMAT(" Completed.\n");
+ }
+ else {
+ _strategy[group].setType(RAStrategy::kStrategyComplex);
+ for (RAWorkReg* workReg : workRegs)
+ workReg->markStackPreferred();
+
+ ASMJIT_RA_LOG_COMPLEX({
+ uint32_t count = workRegs.size();
+ sb.clear();
+ sb.appendFormat(" Unassigned (%u): ", count);
+ for (i = 0; i < numWorkRegs; i++) {
+ RAWorkReg* workReg = workRegs[i];
+ if (i) sb.appendString(", ");
+ sb.appendString(workReg->name());
+ }
+ sb.appendChar('\n');
+ logger->log(sb);
+ });
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Local]
+// ============================================================================
+
+Error RAPass::runLocalAllocator() noexcept {
+ RALocalAllocator lra(this);
+ ASMJIT_PROPAGATE(lra.init());
+
+ if (!blockCount())
+ return kErrorOk;
+
+ // The allocation is done when this reaches zero.
+ uint32_t blocksRemaining = reachableBlockCount();
+
+ // Current block.
+ uint32_t blockId = 0;
+ RABlock* block = _blocks[blockId];
+
+ // The first block (entry) must always be reachable.
+ ASMJIT_ASSERT(block->isReachable());
+
+ // Assign function arguments for the initial block. The `lra` is valid now.
+ lra.makeInitialAssignment();
+ ASMJIT_PROPAGATE(setBlockEntryAssignment(block, block, lra._curAssignment));
+
+ // The loop starts from the first block and iterates blocks in order, however,
+ // the algorithm also allows to jump to any other block when finished if it's
+ // a jump target. In-order iteration just makes sure that all blocks are visited.
+ for (;;) {
+ BaseNode* first = block->first();
+ BaseNode* last = block->last();
+ BaseNode* terminator = block->hasTerminator() ? last : nullptr;
+
+ BaseNode* beforeFirst = first->prev();
+ BaseNode* afterLast = last->next();
+
+ bool unconditionalJump = false;
+ RABlock* consecutive = nullptr;
+
+ if (block->hasSuccessors())
+ consecutive = block->successors()[0];
+
+ lra.setBlock(block);
+ block->makeAllocated();
+
+ BaseNode* node = first;
+ while (node != afterLast) {
+ BaseNode* next = node->next();
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+
+ if (ASMJIT_UNLIKELY(inst == terminator)) {
+ const RABlocks& successors = block->successors();
+ if (block->hasConsecutive()) {
+ ASMJIT_PROPAGATE(lra.allocBranch(inst, successors.last(), successors.first()));
+
+ node = next;
+ continue;
+ }
+ else if (successors.size() > 1) {
+ RABlock* cont = block->hasConsecutive() ? successors.first() : nullptr;
+ ASMJIT_PROPAGATE(lra.allocJumpTable(inst, successors, cont));
+
+ node = next;
+ continue;
+ }
+ else {
+ // Otherwise this is an unconditional jump, special handling isn't required.
+ unconditionalJump = true;
+ }
+ }
+
+ ASMJIT_PROPAGATE(lra.allocInst(inst));
+ if (inst->type() == BaseNode::kNodeFuncCall)
+ ASMJIT_PROPAGATE(onEmitPreCall(inst->as<FuncCallNode>()));
+ else
+ ASMJIT_PROPAGATE(lra.spillAfterAllocation(inst));
+ }
+ node = next;
+ }
+
+ if (consecutive) {
+ BaseNode* prev = afterLast ? afterLast->prev() : cc()->lastNode();
+ cc()->_setCursor(unconditionalJump ? prev->prev() : prev);
+
+ if (consecutive->hasEntryAssignment()) {
+ ASMJIT_PROPAGATE(
+ lra.switchToAssignment(
+ consecutive->entryPhysToWorkMap(),
+ consecutive->entryWorkToPhysMap(),
+ consecutive->liveIn(),
+ consecutive->isAllocated(),
+ false));
+ }
+ else {
+ ASMJIT_PROPAGATE(lra.spillRegsBeforeEntry(consecutive));
+ ASMJIT_PROPAGATE(setBlockEntryAssignment(consecutive, block, lra._curAssignment));
+ lra._curAssignment.copyFrom(consecutive->entryPhysToWorkMap(), consecutive->entryWorkToPhysMap());
+ }
+ }
+
+ // Important as the local allocator can insert instructions before
+ // and after any instruction within the basic block.
+ block->setFirst(beforeFirst->next());
+ block->setLast(afterLast ? afterLast->prev() : cc()->lastNode());
+
+ if (--blocksRemaining == 0)
+ break;
+
+ // Switch to the next consecutive block, if any.
+ if (consecutive) {
+ block = consecutive;
+ if (!block->isAllocated())
+ continue;
+ }
+
+ // Get the next block.
+ for (;;) {
+ if (++blockId >= blockCount())
+ blockId = 0;
+
+ block = _blocks[blockId];
+ if (!block->isReachable() || block->isAllocated() || !block->hasEntryAssignment())
+ continue;
+
+ break;
+ }
+
+ // If we switched to some block we have to update the local allocator.
+ lra.replaceAssignment(block->entryPhysToWorkMap(), block->entryWorkToPhysMap());
+ }
+
+ _clobberedRegs.op<Support::Or>(lra._clobberedRegs);
+ return kErrorOk;
+}
+
+Error RAPass::setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept {
+ if (block->hasSharedAssignmentId()) {
+ uint32_t sharedAssignmentId = block->sharedAssignmentId();
+
+ // Shouldn't happen. Entry assignment of a block that has a shared-state
+ // will assign to all blocks with the same sharedAssignmentId. It's a bug if
+ // the shared state has been already assigned.
+ if (!_sharedAssignments[sharedAssignmentId].empty())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ return setSharedAssignment(sharedAssignmentId, fromAssignment);
+ }
+
+ PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->setEntryAssignment(physToWorkMap, workToPhysMap);
+
+ // True if this is the first (entry) block, nothing to do in this case.
+ if (block == fromBlock) {
+ // Entry block should never have a shared state.
+ if (block->hasSharedAssignmentId())
+ return DebugUtils::errored(kErrorInvalidState);
+
+ return kErrorOk;
+ }
+
+ RAAssignment as;
+ as.initLayout(_physRegCount, workRegs());
+ as.initMaps(physToWorkMap, workToPhysMap);
+
+ const ZoneBitVector& liveOut = fromBlock->liveOut();
+ const ZoneBitVector& liveIn = block->liveIn();
+
+ // It's possible that `fromBlock` has LIVE-OUT regs that `block` doesn't
+ // have in LIVE-IN, these have to be unassigned.
+ {
+ ZoneBitVector::ForEachBitOp<Support::AndNot> it(liveOut, liveIn);
+ while (it.hasNext()) {
+ uint32_t workId = uint32_t(it.next());
+ RAWorkReg* workReg = workRegById(workId);
+
+ uint32_t group = workReg->group();
+ uint32_t physId = as.workToPhysId(group, workId);
+
+ if (physId != RAAssignment::kPhysNone)
+ as.unassign(group, workId, physId);
+ }
+ }
+
+ return blockEntryAssigned(as);
+}
+
+Error RAPass::setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept {
+ ASMJIT_ASSERT(_sharedAssignments[sharedAssignmentId].empty());
+
+ PhysToWorkMap* physToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* workToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!physToWorkMap || !workToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _sharedAssignments[sharedAssignmentId].assignMaps(physToWorkMap, workToPhysMap);
+ ZoneBitVector& sharedLiveIn = _sharedAssignments[sharedAssignmentId]._liveIn;
+ ASMJIT_PROPAGATE(sharedLiveIn.resize(allocator(), workRegCount()));
+
+ RAAssignment as;
+ as.initLayout(_physRegCount, workRegs());
+
+ uint32_t sharedAssigned[BaseReg::kGroupVirt] {};
+
+ for (RABlock* block : blocks()) {
+ if (block->sharedAssignmentId() == sharedAssignmentId) {
+ ASMJIT_ASSERT(!block->hasEntryAssignment());
+
+ PhysToWorkMap* entryPhysToWorkMap = clonePhysToWorkMap(fromAssignment.physToWorkMap());
+ WorkToPhysMap* entryWorkToPhysMap = cloneWorkToPhysMap(fromAssignment.workToPhysMap());
+
+ if (ASMJIT_UNLIKELY(!entryPhysToWorkMap || !entryWorkToPhysMap))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->setEntryAssignment(entryPhysToWorkMap, entryWorkToPhysMap);
+ as.initMaps(entryPhysToWorkMap, entryWorkToPhysMap);
+
+ const ZoneBitVector& liveIn = block->liveIn();
+ sharedLiveIn.or_(liveIn);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ sharedAssigned[group] |= entryPhysToWorkMap->assigned[group];
+ Support::BitWordIterator<uint32_t> it(entryPhysToWorkMap->assigned[group]);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = as.physToWorkId(group, physId);
+
+ if (!liveIn.bitAt(workId))
+ as.unassign(group, workId, physId);
+ }
+ }
+ }
+ }
+
+ {
+ as.initMaps(physToWorkMap, workToPhysMap);
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ Support::BitWordIterator<uint32_t> it(_availableRegs[group] & ~sharedAssigned[group]);
+
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ if (as.isPhysAssigned(group, physId)) {
+ uint32_t workId = as.physToWorkId(group, physId);
+ as.unassign(group, workId, physId);
+ }
+ }
+ }
+ }
+
+ return blockEntryAssigned(as);
+}
+
+Error RAPass::blockEntryAssigned(const RAAssignment& as) noexcept {
+ // Complex allocation strategy requires to record register assignments upon
+ // block entry (or per shared state).
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ if (!_strategy[group].isComplex())
+ continue;
+
+ Support::BitWordIterator<uint32_t> it(as.assigned(group));
+ while (it.hasNext()) {
+ uint32_t physId = it.next();
+ uint32_t workId = as.physToWorkId(group, physId);
+
+ RAWorkReg* workReg = workRegById(workId);
+ workReg->addAllocatedMask(Support::bitMask(physId));
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Utilities]
+// ============================================================================
+
+Error RAPass::useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept {
+ ASMJIT_ASSERT(alignment <= 64);
+
+ if (_temporaryMem.isNone()) {
+ ASMJIT_PROPAGATE(cc()->_newStack(_temporaryMem.as<BaseMem>(), size, alignment));
+ }
+ else {
+ ASMJIT_ASSERT(_temporaryMem.as<BaseMem>().isRegHome());
+
+ uint32_t virtId = _temporaryMem.as<BaseMem>().baseId();
+ VirtReg* virtReg = cc()->virtRegById(virtId);
+
+ cc()->setStackSize(virtId, Support::max(virtReg->virtSize(), size),
+ Support::max(virtReg->alignment(), alignment));
+ }
+
+ out = _temporaryMem.as<BaseMem>();
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Allocation - Prolog / Epilog]
+// ============================================================================
+
+Error RAPass::updateStackFrame() noexcept {
+ // Update some StackFrame information that we updated during allocation. The
+ // only information we don't have at the moment is final local stack size,
+ // which is calculated last.
+ FuncFrame& frame = func()->frame();
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ frame.addDirtyRegs(group, _clobberedRegs[group]);
+ frame.setLocalStackAlignment(_stackAllocator.alignment());
+
+ // If there are stack arguments that are not assigned to registers upon entry
+ // and the function doesn't require dynamic stack alignment we keep these
+ // arguments where they are. This will also mark all stack slots that match
+ // these arguments as allocated.
+ if (_numStackArgsToStackSlots)
+ ASMJIT_PROPAGATE(_markStackArgsToKeep());
+
+ // Calculate offsets of all stack slots and update StackSize to reflect the calculated local stack size.
+ ASMJIT_PROPAGATE(_stackAllocator.calculateStackFrame());
+ frame.setLocalStackSize(_stackAllocator.stackSize());
+
+ // Update the stack frame based on `_argsAssignment` and finalize it.
+ // Finalization means to apply final calculation to the stack layout.
+ ASMJIT_PROPAGATE(_argsAssignment.updateFuncFrame(frame));
+ ASMJIT_PROPAGATE(frame.finalize());
+
+ // StackAllocator allocates all stots starting from [0], adjust them when necessary.
+ if (frame.localStackOffset() != 0)
+ ASMJIT_PROPAGATE(_stackAllocator.adjustSlotOffsets(int32_t(frame.localStackOffset())));
+
+ // Again, if there are stack arguments allocated in function's stack we have
+ // to handle them. This handles all cases (either regular or dynamic stack
+ // alignment).
+ if (_numStackArgsToStackSlots)
+ ASMJIT_PROPAGATE(_updateStackArgs());
+
+ return kErrorOk;
+}
+
+Error RAPass::_markStackArgsToKeep() noexcept {
+ FuncFrame& frame = func()->frame();
+ bool hasSAReg = frame.hasPreservedFP() || !frame.hasDynamicAlignment();
+
+ RAWorkRegs& workRegs = _workRegs;
+ uint32_t numWorkRegs = workRegCount();
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = workRegs[workId];
+ if (workReg->hasFlag(RAWorkReg::kFlagStackArgToStack)) {
+ ASMJIT_ASSERT(workReg->hasArgIndex());
+ const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
+
+ // If the register doesn't have stack slot then we failed. It doesn't
+ // make much sense as it was marked as `kFlagStackArgToStack`, which
+ // requires the WorkReg was live-in upon function entry.
+ RAStackSlot* slot = workReg->stackSlot();
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (hasSAReg && srcArg.isStack() && !srcArg.isIndirect()) {
+ uint32_t typeSize = Type::sizeOf(srcArg.typeId());
+ if (typeSize == slot->size()) {
+ slot->addFlags(RAStackSlot::kFlagStackArg);
+ continue;
+ }
+ }
+
+ // NOTE: Update StackOffset here so when `_argsAssignment.updateFuncFrame()`
+ // is called it will take into consideration moving to stack slots. Without
+ // this we may miss some scratch registers later.
+ FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
+ dstArg.assignStackOffset(0);
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RAPass::_updateStackArgs() noexcept {
+ FuncFrame& frame = func()->frame();
+ RAWorkRegs& workRegs = _workRegs;
+ uint32_t numWorkRegs = workRegCount();
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = workRegs[workId];
+ if (workReg->hasFlag(RAWorkReg::kFlagStackArgToStack)) {
+ ASMJIT_ASSERT(workReg->hasArgIndex());
+ RAStackSlot* slot = workReg->stackSlot();
+
+ if (ASMJIT_UNLIKELY(!slot))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (slot->isStackArg()) {
+ const FuncValue& srcArg = _func->detail().arg(workReg->argIndex());
+ if (frame.hasPreservedFP()) {
+ slot->setBaseRegId(_fp.id());
+ slot->setOffset(int32_t(frame.saOffsetFromSA()) + srcArg.stackOffset());
+ }
+ else {
+ slot->setOffset(int32_t(frame.saOffsetFromSP()) + srcArg.stackOffset());
+ }
+ }
+ else {
+ FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
+ dstArg.setStackOffset(slot->offset());
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error RAPass::insertPrologEpilog() noexcept {
+ FuncFrame& frame = _func->frame();
+
+ cc()->_setCursor(func());
+ ASMJIT_PROPAGATE(cc()->emitProlog(frame));
+ ASMJIT_PROPAGATE(cc()->emitArgsAssignment(frame, _argsAssignment));
+
+ cc()->_setCursor(func()->exitNode());
+ ASMJIT_PROPAGATE(cc()->emitEpilog(frame));
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Rewriter]
+// ============================================================================
+
+Error RAPass::rewrite() noexcept {
+#ifndef ASMJIT_NO_LOGGING
+ Logger* logger = debugLogger();
+ ASMJIT_RA_LOG_FORMAT("[RAPass::Rewrite]\n");
+#endif
+
+ return _rewrite(_func, _stop);
+}
+
+ASMJIT_FAVOR_SPEED Error RAPass::_rewrite(BaseNode* first, BaseNode* stop) noexcept {
+ uint32_t virtCount = cc()->_vRegArray.size();
+
+ BaseNode* node = first;
+ while (node != stop) {
+ BaseNode* next = node->next();
+ if (node->isInst()) {
+ InstNode* inst = node->as<InstNode>();
+ RAInst* raInst = node->passData<RAInst>();
+
+ Operand* operands = inst->operands();
+ uint32_t opCount = inst->opCount();
+ uint32_t i;
+
+ // Rewrite virtual registers into physical registers.
+ if (ASMJIT_LIKELY(raInst)) {
+ // If the instruction contains pass data (raInst) then it was a subject
+ // for register allocation and must be rewritten to use physical regs.
+ RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t tiedCount = raInst->tiedCount();
+
+ for (i = 0; i < tiedCount; i++) {
+ RATiedReg* tiedReg = &tiedRegs[i];
+
+ Support::BitWordIterator<uint32_t> useIt(tiedReg->useRewriteMask());
+ uint32_t useId = tiedReg->useId();
+ while (useIt.hasNext()) inst->rewriteIdAtIndex(useIt.next(), useId);
+
+ Support::BitWordIterator<uint32_t> outIt(tiedReg->outRewriteMask());
+ uint32_t outId = tiedReg->outId();
+ while (outIt.hasNext()) inst->rewriteIdAtIndex(outIt.next(), outId);
+ }
+
+ // This data is allocated by Zone passed to `runOnFunction()`, which
+ // will be reset after the RA pass finishes. So reset this data to
+ // prevent having a dead pointer after RA pass is complete.
+ node->resetPassData();
+
+ if (ASMJIT_UNLIKELY(node->type() != BaseNode::kNodeInst)) {
+ // FuncRet terminates the flow, it must either be removed if the exit
+ // label is next to it (optimization) or patched to an architecture
+ // dependent jump instruction that jumps to the function's exit before
+ // the epilog.
+ if (node->type() == BaseNode::kNodeFuncRet) {
+ RABlock* block = raInst->block();
+ if (!isNextTo(node, _func->exitNode())) {
+ cc()->_setCursor(node->prev());
+ ASMJIT_PROPAGATE(onEmitJump(_func->exitNode()->label()));
+ }
+
+ BaseNode* prev = node->prev();
+ cc()->removeNode(node);
+ block->setLast(prev);
+ }
+ }
+ }
+
+ // Rewrite stack slot addresses.
+ for (i = 0; i < opCount; i++) {
+ Operand& op = operands[i];
+ if (op.isMem()) {
+ BaseMem& mem = op.as<BaseMem>();
+ if (mem.isRegHome()) {
+ uint32_t virtIndex = Operand::virtIdToIndex(mem.baseId());
+ if (ASMJIT_UNLIKELY(virtIndex >= virtCount))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+
+ VirtReg* virtReg = cc()->virtRegByIndex(virtIndex);
+ RAWorkReg* workReg = virtReg->workReg();
+ ASMJIT_ASSERT(workReg != nullptr);
+
+ RAStackSlot* slot = workReg->stackSlot();
+ int32_t offset = slot->offset();
+
+ mem._setBase(_sp.type(), slot->baseRegId());
+ mem.clearRegHome();
+ mem.addOffsetLo32(offset);
+ }
+ }
+ }
+ }
+
+ node = next;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::RAPass - Logging]
+// ============================================================================
+
+#ifndef ASMJIT_NO_LOGGING
+static void RAPass_dumpRAInst(RAPass* pass, String& sb, const RAInst* raInst) noexcept {
+ const RATiedReg* tiedRegs = raInst->tiedRegs();
+ uint32_t tiedCount = raInst->tiedCount();
+
+ for (uint32_t i = 0; i < tiedCount; i++) {
+ const RATiedReg& tiedReg = tiedRegs[i];
+
+ if (i != 0) sb.appendChar(' ');
+
+ sb.appendFormat("%s{", pass->workRegById(tiedReg.workId())->name());
+ sb.appendChar(tiedReg.isReadWrite() ? 'X' :
+ tiedReg.isRead() ? 'R' :
+ tiedReg.isWrite() ? 'W' : '?');
+
+ if (tiedReg.hasUseId())
+ sb.appendFormat("|Use=%u", tiedReg.useId());
+ else if (tiedReg.isUse())
+ sb.appendString("|Use");
+
+ if (tiedReg.hasOutId())
+ sb.appendFormat("|Out=%u", tiedReg.outId());
+ else if (tiedReg.isOut())
+ sb.appendString("|Out");
+
+ if (tiedReg.isLast()) sb.appendString("|Last");
+ if (tiedReg.isKill()) sb.appendString("|Kill");
+
+ sb.appendString("}");
+ }
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::annotateCode() noexcept {
+ uint32_t loggerFlags = _loggerFlags;
+ StringTmp<1024> sb;
+
+ for (const RABlock* block : _blocks) {
+ BaseNode* node = block->first();
+ if (!node) continue;
+
+ BaseNode* last = block->last();
+ for (;;) {
+ sb.clear();
+ Logging::formatNode(sb, loggerFlags, cc(), node);
+
+ if ((loggerFlags & FormatOptions::kFlagDebugRA) != 0 && node->isInst() && node->hasPassData()) {
+ const RAInst* raInst = node->passData<RAInst>();
+ if (raInst->tiedCount() > 0) {
+ sb.padEnd(40);
+ sb.appendString(" | ");
+ RAPass_dumpRAInst(this, sb, raInst);
+ }
+ }
+
+ node->setInlineComment(
+ static_cast<char*>(
+ cc()->_dataZone.dup(sb.data(), sb.size(), true)));
+
+ if (node == last)
+ break;
+ node = node->next();
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpBlockIds(String& sb, const RABlocks& blocks) noexcept {
+ for (uint32_t i = 0, size = blocks.size(); i < size; i++) {
+ const RABlock* block = blocks[i];
+ if (i != 0)
+ ASMJIT_PROPAGATE(sb.appendFormat(", #%u", block->blockId()));
+ else
+ ASMJIT_PROPAGATE(sb.appendFormat("#%u", block->blockId()));
+ }
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpBlockLiveness(String& sb, const RABlock* block) noexcept {
+ for (uint32_t liveType = 0; liveType < RABlock::kLiveCount; liveType++) {
+ const char* bitsName = liveType == RABlock::kLiveIn ? "IN " :
+ liveType == RABlock::kLiveOut ? "OUT " :
+ liveType == RABlock::kLiveGen ? "GEN " : "KILL";
+
+ const ZoneBitVector& bits = block->_liveBits[liveType];
+ uint32_t size = bits.size();
+ ASMJIT_ASSERT(size <= workRegCount());
+
+ uint32_t n = 0;
+ for (uint32_t workId = 0; workId < size; workId++) {
+ if (bits.bitAt(workId)) {
+ RAWorkReg* wReg = workRegById(workId);
+
+ if (!n)
+ sb.appendFormat(" %s [", bitsName);
+ else
+ sb.appendString(", ");
+
+ sb.appendString(wReg->name());
+ n++;
+ }
+ }
+
+ if (n)
+ sb.appendString("]\n");
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error RAPass::_dumpLiveSpans(String& sb) noexcept {
+ uint32_t numWorkRegs = _workRegs.size();
+ uint32_t maxSize = _maxWorkRegNameSize;
+
+ for (uint32_t workId = 0; workId < numWorkRegs; workId++) {
+ RAWorkReg* workReg = _workRegs[workId];
+
+ sb.appendString(" ");
+
+ size_t oldSize = sb.size();
+ sb.appendString(workReg->name());
+ sb.padEnd(oldSize + maxSize);
+
+ RALiveStats& stats = workReg->liveStats();
+ sb.appendFormat(" {id:%04u width: %-4u freq: %0.4f priority=%0.4f}",
+ workReg->virtId(),
+ stats.width(),
+ stats.freq(),
+ stats.priority());
+ sb.appendString(": ");
+
+ LiveRegSpans& liveSpans = workReg->liveSpans();
+ for (uint32_t x = 0; x < liveSpans.size(); x++) {
+ const LiveRegSpan& liveSpan = liveSpans[x];
+ if (x) sb.appendString(", ");
+ sb.appendFormat("[%u:%u]", liveSpan.a, liveSpan.b);
+ }
+
+ sb.appendChar('\n');
+ }
+
+ return kErrorOk;
+}
+#endif
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/core/rapass_p.h b/3rdparty/asmjit/src/asmjit/core/rapass_p.h
new file mode 100644
index 00000000000..5a575ad4bae
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/rapass_p.h
@@ -0,0 +1,1189 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RAPASS_P_H_INCLUDED
+#define ASMJIT_CORE_RAPASS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/raassignment_p.h"
+#include "../core/radefs_p.h"
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RABlock]
+// ============================================================================
+
+class RABlock {
+public:
+ ASMJIT_NONCOPYABLE(RABlock)
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ enum Id : uint32_t {
+ kUnassignedId = 0xFFFFFFFFu
+ };
+
+ enum Flags : uint32_t {
+ //! Block has been constructed from nodes.
+ kFlagIsConstructed = 0x00000001u,
+ //! Block is reachable (set by `buildViews()`).
+ kFlagIsReachable = 0x00000002u,
+ //! Block has been allocated.
+ kFlagIsAllocated = 0x00000004u,
+ //! Block is a function-exit.
+ kFlagIsFuncExit = 0x00000008u,
+
+ //! Block has a terminator (jump, conditional jump, ret).
+ kFlagHasTerminator = 0x00000010u,
+ //! Block naturally flows to the next block.
+ kFlagHasConsecutive = 0x00000020u,
+ //! Block contains fixed registers (precolored).
+ kFlagHasFixedRegs = 0x00000040u,
+ //! Block contains function calls.
+ kFlagHasFuncCalls = 0x00000080u
+ };
+
+ //! Register allocator pass.
+ RAPass* _ra;
+
+ //! Block id (indexed from zero).
+ uint32_t _blockId;
+ //! Block flags, see `Flags`.
+ uint32_t _flags;
+
+ //! First `BaseNode` of this block (inclusive).
+ BaseNode* _first;
+ //! Last `BaseNode` of this block (inclusive).
+ BaseNode* _last;
+
+ //! Initial position of this block (inclusive).
+ uint32_t _firstPosition;
+ //! End position of this block (exclusive).
+ uint32_t _endPosition;
+
+ //! Weight of this block (default 0, each loop adds one).
+ uint32_t _weight;
+ //! Post-order view order, used during POV construction.
+ uint32_t _povOrder;
+
+ //! Basic statistics about registers.
+ RARegsStats _regsStats;
+ //! Maximum live-count per register group.
+ RALiveCount _maxLiveCount;
+
+ //! Timestamp (used by block visitors).
+ mutable uint64_t _timestamp;
+ //! Immediate dominator of this block.
+ RABlock* _idom;
+
+ //! Block predecessors.
+ RABlocks _predecessors;
+ //! Block successors.
+ RABlocks _successors;
+
+ // TODO: Used?
+ RABlocks _doms;
+
+ enum LiveType : uint32_t {
+ kLiveIn = 0,
+ kLiveOut = 1,
+ kLiveGen = 2,
+ kLiveKill = 3,
+ kLiveCount = 4
+ };
+
+ //! Liveness in/out/use/kill.
+ ZoneBitVector _liveBits[kLiveCount];
+
+ //! Shared assignment it or `Globals::kInvalidId` if this block doesn't
+ //! have shared assignment. See `RASharedAssignment` for more details.
+ uint32_t _sharedAssignmentId;
+ //! Scratch registers that cannot be allocated upon block entry.
+ uint32_t _entryScratchGpRegs;
+ //! Scratch registers used at exit, by a terminator instruction.
+ uint32_t _exitScratchGpRegs;
+
+ //! Register assignment (PhysToWork) on entry.
+ PhysToWorkMap* _entryPhysToWorkMap;
+ //! Register assignment (WorkToPhys) on entry.
+ WorkToPhysMap* _entryWorkToPhysMap;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RABlock(RAPass* ra) noexcept
+ : _ra(ra),
+ _blockId(kUnassignedId),
+ _flags(0),
+ _first(nullptr),
+ _last(nullptr),
+ _firstPosition(0),
+ _endPosition(0),
+ _weight(0),
+ _povOrder(kUnassignedId),
+ _regsStats(),
+ _maxLiveCount(),
+ _timestamp(0),
+ _idom(nullptr),
+ _predecessors(),
+ _successors(),
+ _doms(),
+ _sharedAssignmentId(Globals::kInvalidId),
+ _entryScratchGpRegs(0),
+ _exitScratchGpRegs(0),
+ _entryPhysToWorkMap(nullptr),
+ _entryWorkToPhysMap(nullptr) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline RAPass* pass() const noexcept { return _ra; }
+ inline ZoneAllocator* allocator() const noexcept;
+
+ inline uint32_t blockId() const noexcept { return _blockId; }
+ inline uint32_t flags() const noexcept { return _flags; }
+
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+
+ inline bool isAssigned() const noexcept { return _blockId != kUnassignedId; }
+
+ inline bool isConstructed() const noexcept { return hasFlag(kFlagIsConstructed); }
+ inline bool isReachable() const noexcept { return hasFlag(kFlagIsReachable); }
+ inline bool isAllocated() const noexcept { return hasFlag(kFlagIsAllocated); }
+ inline bool isFuncExit() const noexcept { return hasFlag(kFlagIsFuncExit); }
+
+ inline void makeConstructed(const RARegsStats& regStats) noexcept {
+ _flags |= kFlagIsConstructed;
+ _regsStats.combineWith(regStats);
+ }
+
+ inline void makeReachable() noexcept { _flags |= kFlagIsReachable; }
+ inline void makeAllocated() noexcept { _flags |= kFlagIsAllocated; }
+
+ inline const RARegsStats& regsStats() const noexcept { return _regsStats; }
+
+ inline bool hasTerminator() const noexcept { return hasFlag(kFlagHasTerminator); }
+ inline bool hasConsecutive() const noexcept { return hasFlag(kFlagHasConsecutive); }
+
+ inline bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
+ inline bool hasSuccessors() const noexcept { return !_successors.empty(); }
+
+ inline const RABlocks& predecessors() const noexcept { return _predecessors; }
+ inline const RABlocks& successors() const noexcept { return _successors; }
+
+ inline BaseNode* first() const noexcept { return _first; }
+ inline BaseNode* last() const noexcept { return _last; }
+
+ inline void setFirst(BaseNode* node) noexcept { _first = node; }
+ inline void setLast(BaseNode* node) noexcept { _last = node; }
+
+ inline uint32_t firstPosition() const noexcept { return _firstPosition; }
+ inline void setFirstPosition(uint32_t position) noexcept { _firstPosition = position; }
+
+ inline uint32_t endPosition() const noexcept { return _endPosition; }
+ inline void setEndPosition(uint32_t position) noexcept { _endPosition = position; }
+
+ inline uint32_t povOrder() const noexcept { return _povOrder; }
+
+ inline uint32_t entryScratchGpRegs() const noexcept;
+ inline uint32_t exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
+
+ inline void addExitScratchGpRegs(uint32_t regMask) noexcept { _exitScratchGpRegs |= regMask; }
+
+ inline bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
+ inline uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; }
+ inline void setSharedAssignmentId(uint32_t id) noexcept { _sharedAssignmentId = id; }
+
+ inline uint64_t timestamp() const noexcept { return _timestamp; }
+ inline bool hasTimestamp(uint64_t ts) const noexcept { return _timestamp == ts; }
+ inline void setTimestamp(uint64_t ts) const noexcept { _timestamp = ts; }
+ inline void resetTimestamp() const noexcept { _timestamp = 0; }
+
+ inline RABlock* consecutive() const noexcept { return hasConsecutive() ? _successors[0] : nullptr; }
+
+ inline RABlock* iDom() noexcept { return _idom; }
+ inline const RABlock* iDom() const noexcept { return _idom; }
+ inline void setIDom(RABlock* block) noexcept { _idom = block; }
+
+ inline ZoneBitVector& liveIn() noexcept { return _liveBits[kLiveIn]; }
+ inline const ZoneBitVector& liveIn() const noexcept { return _liveBits[kLiveIn]; }
+
+ inline ZoneBitVector& liveOut() noexcept { return _liveBits[kLiveOut]; }
+ inline const ZoneBitVector& liveOut() const noexcept { return _liveBits[kLiveOut]; }
+
+ inline ZoneBitVector& gen() noexcept { return _liveBits[kLiveGen]; }
+ inline const ZoneBitVector& gen() const noexcept { return _liveBits[kLiveGen]; }
+
+ inline ZoneBitVector& kill() noexcept { return _liveBits[kLiveKill]; }
+ inline const ZoneBitVector& kill() const noexcept { return _liveBits[kLiveKill]; }
+
+ inline Error resizeLiveBits(uint32_t size) noexcept {
+ ASMJIT_PROPAGATE(_liveBits[kLiveIn ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveOut ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveGen ].resize(allocator(), size));
+ ASMJIT_PROPAGATE(_liveBits[kLiveKill].resize(allocator(), size));
+ return kErrorOk;
+ }
+
+ inline bool hasEntryAssignment() const noexcept { return _entryPhysToWorkMap != nullptr; }
+ inline WorkToPhysMap* entryWorkToPhysMap() const noexcept { return _entryWorkToPhysMap; }
+ inline PhysToWorkMap* entryPhysToWorkMap() const noexcept { return _entryPhysToWorkMap; }
+
+ inline void setEntryAssignment(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _entryPhysToWorkMap = physToWorkMap;
+ _entryWorkToPhysMap = workToPhysMap;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Adds a successor to this block, and predecessor to `successor`, making
+ //! connection on both sides.
+ //!
+ //! This API must be used to manage successors and predecessors, never manage
+ //! it manually.
+ Error appendSuccessor(RABlock* successor) noexcept;
+
+ //! Similar to `appendSuccessor()`, but does prepend instead append.
+ //!
+ //! This function is used to add a natural flow (always first) to the block.
+ Error prependSuccessor(RABlock* successor) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAInst]
+// ============================================================================
+
+//! Register allocator's data associated with each `InstNode`.
+class RAInst {
+public:
+ ASMJIT_NONCOPYABLE(RAInst)
+
+ //! Parent block.
+ RABlock* _block;
+ //! Instruction flags.
+ uint32_t _flags;
+ //! Total count of RATiedReg's.
+ uint32_t _tiedTotal;
+ //! Index of RATiedReg's per register group.
+ RARegIndex _tiedIndex;
+ //! Count of RATiedReg's per register group.
+ RARegCount _tiedCount;
+ //! Number of live, and thus interfering VirtReg's at this point.
+ RALiveCount _liveCount;
+ //! Fixed physical registers used.
+ RARegMask _usedRegs;
+ //! Clobbered registers (by a function call).
+ RARegMask _clobberedRegs;
+ //! Tied registers.
+ RATiedReg _tiedRegs[1];
+
+ enum Flags : uint32_t {
+ kFlagIsTerminator = 0x00000001u
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_INLINE RAInst(RABlock* block, uint32_t flags, uint32_t tiedTotal, const RARegMask& clobberedRegs) noexcept {
+ _block = block;
+ _flags = flags;
+ _tiedTotal = tiedTotal;
+ _tiedIndex.reset();
+ _tiedCount.reset();
+ _liveCount.reset();
+ _usedRegs.reset();
+ _clobberedRegs = clobberedRegs;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the instruction flags.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the instruction has flag `flag`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+ //! Replaces the existing instruction flags with `flags`.
+ inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
+ //! Adds instruction `flags` to this RAInst.
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ //! Clears instruction `flags` from this RAInst.
+ inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
+
+ //! Returns whether the RAInst represents an instruction that terminates this basic block.
+ inline bool isTerminator() const noexcept { return hasFlag(kFlagIsTerminator); }
+
+ //! Returns the associated block with this RAInst.
+ inline RABlock* block() const noexcept { return _block; }
+
+ //! Returns tied registers (all).
+ inline RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); }
+ //! Returns tied registers for a given `group`.
+ inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
+
+ //! Returns count of all tied registers.
+ inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
+ //! Returns count of tied registers of a given `group`.
+ inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount[group]; }
+
+ //! Returns `RATiedReg` at the given `index`.
+ inline RATiedReg* tiedAt(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _tiedTotal);
+ return tiedRegs() + index;
+ }
+
+ //! Returns `RATiedReg` at the given `index` of the given register `group`.
+ inline RATiedReg* tiedOf(uint32_t group, uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _tiedCount._regs[group]);
+ return tiedRegs(group) + index;
+ }
+
+ inline void setTiedAt(uint32_t index, RATiedReg& tied) noexcept {
+ ASMJIT_ASSERT(index < _tiedTotal);
+ _tiedRegs[index] = tied;
+ }
+
+ //! \name Static Functions
+ //! \{
+
+ static inline size_t sizeOf(uint32_t tiedRegCount) noexcept {
+ return sizeof(RAInst) - sizeof(RATiedReg) + tiedRegCount * sizeof(RATiedReg);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RAInstBuilder]
+// ============================================================================
+
+//! A helper class that is used to build an array of RATiedReg items that are
+//! then copied to `RAInst`.
+class RAInstBuilder {
+public:
+ ASMJIT_NONCOPYABLE(RAInstBuilder)
+
+ //! Flags combined from all RATiedReg's.
+ uint32_t _aggregatedFlags;
+ //! Flags that will be cleared before storing the aggregated flags to `RAInst`.
+ uint32_t _forbiddenFlags;
+ RARegCount _count;
+ RARegsStats _stats;
+
+ RARegMask _used;
+ RARegMask _clobbered;
+
+ //! Current tied register in `_tiedRegs`.
+ RATiedReg* _cur;
+ //! Array of temporary tied registers.
+ RATiedReg _tiedRegs[128];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline RAInstBuilder() noexcept { reset(); }
+
+ inline void init() noexcept { reset(); }
+ inline void reset() noexcept {
+ _aggregatedFlags = 0;
+ _forbiddenFlags = 0;
+ _count.reset();
+ _stats.reset();
+ _used.reset();
+ _clobbered.reset();
+ _cur = _tiedRegs;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t aggregatedFlags() const noexcept { return _aggregatedFlags; }
+ inline uint32_t forbiddenFlags() const noexcept { return _forbiddenFlags; }
+
+ inline void addAggregatedFlags(uint32_t flags) noexcept { _aggregatedFlags |= flags; }
+ inline void addForbiddenFlags(uint32_t flags) noexcept { _forbiddenFlags |= flags; }
+
+ //! Returns the number of tied registers added to the builder.
+ inline uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); }
+
+ inline RATiedReg* begin() noexcept { return _tiedRegs; }
+ inline RATiedReg* end() noexcept { return _cur; }
+
+ inline const RATiedReg* begin() const noexcept { return _tiedRegs; }
+ inline const RATiedReg* end() const noexcept { return _cur; }
+
+ //! Returns `RATiedReg` at the given `index`.
+ inline RATiedReg* operator[](uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < tiedRegCount());
+ return &_tiedRegs[index];
+ }
+
+ //! Returns `RATiedReg` at the given `index`. (const).
+ inline const RATiedReg* operator[](uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < tiedRegCount());
+ return &_tiedRegs[index];
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ ASMJIT_INLINE Error add(RAWorkReg* workReg, uint32_t flags, uint32_t allocable, uint32_t useId, uint32_t useRewriteMask, uint32_t outId, uint32_t outRewriteMask, uint32_t rmSize = 0) noexcept {
+ uint32_t group = workReg->group();
+ RATiedReg* tiedReg = workReg->tiedReg();
+
+ if (useId != BaseReg::kIdBad) {
+ _stats.makeFixed(group);
+ _used[group] |= Support::bitMask(useId);
+ flags |= RATiedReg::kUseFixed;
+ }
+
+ if (outId != BaseReg::kIdBad) {
+ _clobbered[group] |= Support::bitMask(outId);
+ flags |= RATiedReg::kOutFixed;
+ }
+
+ _aggregatedFlags |= flags;
+ _stats.makeUsed(group);
+
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, useId, useRewriteMask, outId, outRewriteMask, rmSize);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (useId != BaseReg::kIdBad) {
+ if (ASMJIT_UNLIKELY(tiedReg->hasUseId()))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ tiedReg->setUseId(useId);
+ }
+
+ if (outId != BaseReg::kIdBad) {
+ if (ASMJIT_UNLIKELY(tiedReg->hasOutId()))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ tiedReg->setOutId(outId);
+ // TODO: ? _used[group] |= Support::bitMask(outId);
+ }
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ tiedReg->_allocableRegs &= allocable;
+ tiedReg->_useRewriteMask |= useRewriteMask;
+ tiedReg->_outRewriteMask |= outRewriteMask;
+ tiedReg->_rmSize = uint8_t(Support::max<uint32_t>(tiedReg->rmSize(), rmSize));
+ return kErrorOk;
+ }
+ }
+
+ ASMJIT_INLINE Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept {
+ ASMJIT_ASSERT(useId != BaseReg::kIdBad);
+
+ uint32_t flags = RATiedReg::kUse | RATiedReg::kRead | RATiedReg::kUseFixed;
+ uint32_t group = workReg->group();
+ uint32_t allocable = Support::bitMask(useId);
+
+ _aggregatedFlags |= flags;
+ _used[group] |= allocable;
+ _stats.makeFixed(group);
+ _stats.makeUsed(group);
+
+ RATiedReg* tiedReg = workReg->tiedReg();
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, useId, 0, BaseReg::kIdBad, 0);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (tiedReg->hasUseId()) {
+ flags |= RATiedReg::kDuplicate;
+ tiedReg->_allocableRegs |= allocable;
+ }
+ else {
+ tiedReg->setUseId(useId);
+ tiedReg->_allocableRegs &= allocable;
+ }
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ return kErrorOk;
+ }
+ }
+
+ ASMJIT_INLINE Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept {
+ ASMJIT_ASSERT(outId != BaseReg::kIdBad);
+
+ uint32_t flags = RATiedReg::kOut | RATiedReg::kWrite | RATiedReg::kOutFixed;
+ uint32_t group = workReg->group();
+ uint32_t allocable = Support::bitMask(outId);
+
+ _aggregatedFlags |= flags;
+ _used[group] |= allocable;
+ _stats.makeFixed(group);
+ _stats.makeUsed(group);
+
+ RATiedReg* tiedReg = workReg->tiedReg();
+ if (!tiedReg) {
+ // Could happen when the builder is not reset properly after each instruction.
+ ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
+
+ tiedReg = _cur++;
+ tiedReg->init(workReg->workId(), flags, allocable, BaseReg::kIdBad, 0, outId, 0);
+ workReg->setTiedReg(tiedReg);
+
+ _count.add(group);
+ return kErrorOk;
+ }
+ else {
+ if (tiedReg->hasOutId())
+ return DebugUtils::errored(kErrorOverlappedRegs);
+
+ tiedReg->addRefCount();
+ tiedReg->addFlags(flags);
+ tiedReg->setOutId(outId);
+ return kErrorOk;
+ }
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::RASharedAssignment]
+// ============================================================================
+
+class RASharedAssignment {
+public:
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Bit-mask of registers that cannot be used upon a block entry, for each
+ //! block that has this shared assignment. Scratch registers can come from
+ //! ISA limits (like jecx/loop instructions on x86) or because the registers
+ //! are used by jump/branch instruction that uses registers to perform an
+ //! indirect jump.
+ uint32_t _entryScratchGpRegs;
+ //! Union of all live-in registers.
+ ZoneBitVector _liveIn;
+ //! Register assignment (PhysToWork).
+ PhysToWorkMap* _physToWorkMap;
+ //! Register assignment (WorkToPhys).
+ WorkToPhysMap* _workToPhysMap;
+
+ //! Provided for clarity, most likely never called as we initialize a vector
+ //! of shared assignments to zero.
+ inline RASharedAssignment() noexcept
+ : _entryScratchGpRegs(0),
+ _liveIn(),
+ _physToWorkMap(nullptr),
+ _workToPhysMap(nullptr) {}
+
+ inline uint32_t entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
+ inline void addScratchGpRegs(uint32_t mask) noexcept { _entryScratchGpRegs |= mask; }
+
+ inline const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
+
+ inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
+ inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
+
+ inline bool empty() const noexcept {
+ return _physToWorkMap == nullptr;
+ }
+
+ inline void assignMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
+ _physToWorkMap = physToWorkMap;
+ _workToPhysMap = workToPhysMap;
+ }
+};
+
+// ============================================================================
+// [asmjit::RAPass]
+// ============================================================================
+
+//! Register allocation pass used by `BaseCompiler`.
+class RAPass : public FuncPass {
+public:
+ ASMJIT_NONCOPYABLE(RAPass)
+ typedef FuncPass Base;
+
+ enum Weights : uint32_t {
+ kCallArgWeight = 80
+ };
+
+ typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
+ typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
+
+ //! Allocator that uses zone passed to `runOnFunction()`.
+ ZoneAllocator _allocator;
+ //! Logger, disabled if null.
+ Logger* _logger;
+ //! Debug logger, non-null only if `kOptionDebugPasses` option is set.
+ Logger* _debugLogger;
+ //! Logger flags.
+ uint32_t _loggerFlags;
+
+ //! Function being processed.
+ FuncNode* _func;
+ //! Stop node.
+ BaseNode* _stop;
+ //! Node that is used to insert extra code after the function body.
+ BaseNode* _extraBlock;
+
+ //! Blocks (first block is the entry, always exists).
+ RABlocks _blocks;
+ //! Function exit blocks (usually one, but can contain more).
+ RABlocks _exits;
+ //! Post order view (POV).
+ RABlocks _pov;
+
+ //! Number of instruction nodes.
+ uint32_t _instructionCount;
+ //! Number of created blocks (internal).
+ uint32_t _createdBlockCount;
+
+ //! SharedState blocks.
+ ZoneVector<RASharedAssignment> _sharedAssignments;
+
+ //! Timestamp generator (incremental).
+ mutable uint64_t _lastTimestamp;
+
+ //!< Architecture registers information.
+ const ArchRegs* _archRegsInfo;
+ //! Architecture traits.
+ RAArchTraits _archTraits;
+ //! Index to physical registers in `RAAssignment::PhysToWorkMap`.
+ RARegIndex _physRegIndex;
+ //! Count of physical registers in `RAAssignment::PhysToWorkMap`.
+ RARegCount _physRegCount;
+ //! Total number of physical registers.
+ uint32_t _physRegTotal;
+ //! Indexes of a possible scratch registers that can be selected if necessary.
+ uint8_t _scratchRegIndexes[2];
+
+ //! Registers available for allocation.
+ RARegMask _availableRegs;
+ //! Count of physical registers per group.
+ RARegCount _availableRegCount;
+ //! Registers clobbered by the function.
+ RARegMask _clobberedRegs;
+
+ //! Work registers (registers used by the function).
+ RAWorkRegs _workRegs;
+ //! Work registers per register group.
+ RAWorkRegs _workRegsOfGroup[BaseReg::kGroupVirt];
+
+ //! Register allocation strategy per register group.
+ RAStrategy _strategy[BaseReg::kGroupVirt];
+ //! Global max live-count (from all blocks) per register group.
+ RALiveCount _globalMaxLiveCount;
+ //! Global live spans per register group.
+ LiveRegSpans* _globalLiveSpans[BaseReg::kGroupVirt];
+ //! Temporary stack slot.
+ Operand _temporaryMem;
+
+ //! Stack pointer.
+ BaseReg _sp;
+ //! Frame pointer.
+ BaseReg _fp;
+ //! Stack manager.
+ RAStackAllocator _stackAllocator;
+ //! Function arguments assignment.
+ FuncArgsAssignment _argsAssignment;
+ //! Some StackArgs have to be assigned to StackSlots.
+ uint32_t _numStackArgsToStackSlots;
+
+ //! Maximum name-size computed from all WorkRegs.
+ uint32_t _maxWorkRegNameSize;
+ //! Temporary string builder used to format comments.
+ StringTmp<80> _tmpString;
+
+ //! \name Construction & Reset
+ //! \{
+
+ RAPass() noexcept;
+ virtual ~RAPass() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns `Logger` passed to `runOnFunction()`.
+ inline Logger* logger() const noexcept { return _logger; }
+ //! Returns `Logger` passed to `runOnFunction()` or null if `kOptionDebugPasses` is not set.
+ inline Logger* debugLogger() const noexcept { return _debugLogger; }
+
+ //! Returns `Zone` passed to `runOnFunction()`.
+ inline Zone* zone() const noexcept { return _allocator.zone(); }
+ //! Returns `ZoneAllocator` used by the register allocator.
+ inline ZoneAllocator* allocator() const noexcept { return const_cast<ZoneAllocator*>(&_allocator); }
+
+ inline const ZoneVector<RASharedAssignment>& sharedAssignments() const { return _sharedAssignments; }
+ inline uint32_t sharedAssignmentCount() const noexcept { return _sharedAssignments.size(); }
+
+ //! Returns the current function node.
+ inline FuncNode* func() const noexcept { return _func; }
+ //! Returns the stop of the current function.
+ inline BaseNode* stop() const noexcept { return _stop; }
+
+ //! Returns an extra block used by the current function being processed.
+ inline BaseNode* extraBlock() const noexcept { return _extraBlock; }
+ //! Sets an extra block, see `extraBlock()`.
+ inline void setExtraBlock(BaseNode* node) noexcept { _extraBlock = node; }
+
+ inline uint32_t endPosition() const noexcept { return _instructionCount * 2; }
+
+ inline const RARegMask& availableRegs() const noexcept { return _availableRegs; }
+ inline const RARegMask& cloberredRegs() const noexcept { return _clobberedRegs; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void makeUnavailable(uint32_t group, uint32_t regId) noexcept {
+ _availableRegs[group] &= ~Support::bitMask(regId);
+ _availableRegCount[group]--;
+ }
+
+ //! Runs the register allocator for the given `func`.
+ Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func) noexcept override;
+
+ //! Performs all allocation steps sequentially, called by `runOnFunction()`.
+ Error onPerformAllSteps() noexcept;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ //! Called by `runOnFunction()` before the register allocation to initialize
+ //! architecture-specific data and constraints.
+ virtual void onInit() noexcept = 0;
+
+ //! Called by `runOnFunction()` after register allocation to clean everything
+ //! up. Called even if the register allocation failed.
+ virtual void onDone() noexcept = 0;
+
+ //! \}
+
+ //! \name CFG - Basic-Block Management
+ //! \{
+
+ //! Returns the function's entry block.
+ inline RABlock* entryBlock() noexcept {
+ ASMJIT_ASSERT(!_blocks.empty());
+ return _blocks[0];
+ }
+
+ //! \overload
+ inline const RABlock* entryBlock() const noexcept {
+ ASMJIT_ASSERT(!_blocks.empty());
+ return _blocks[0];
+ }
+
+ //! Returns all basic blocks of this function.
+ inline RABlocks& blocks() noexcept { return _blocks; }
+ //! \overload
+ inline const RABlocks& blocks() const noexcept { return _blocks; }
+
+ //! Returns the count of basic blocks (returns size of `_blocks` array).
+ inline uint32_t blockCount() const noexcept { return _blocks.size(); }
+ //! Returns the count of reachable basic blocks (returns size of `_pov` array).
+ inline uint32_t reachableBlockCount() const noexcept { return _pov.size(); }
+
+ //! Tests whether the CFG has dangling blocks - these were created by `newBlock()`,
+ //! but not added to CFG through `addBlocks()`. If `true` is returned and the
+ //! CFG is constructed it means that something is missing and it's incomplete.
+ //!
+ //! \note This is only used to check if the number of created blocks matches
+ //! the number of added blocks.
+ inline bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); }
+
+ //! Gest a next timestamp to be used to mark CFG blocks.
+ inline uint64_t nextTimestamp() const noexcept { return ++_lastTimestamp; }
+
+ //! Createss a new `RABlock` instance.
+ //!
+ //! \note New blocks don't have ID assigned until they are added to the block
+ //! array by calling `addBlock()`.
+ RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept;
+
+ //! Tries to find a neighboring LabelNode (without going through code) that is
+ //! already connected with `RABlock`. If no label is found then a new RABlock
+ //! is created and assigned to all possible labels in a backward direction.
+ RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept;
+
+ //! Adds the given `block` to the block list and assign it a unique block id.
+ Error addBlock(RABlock* block) noexcept;
+
+ inline Error addExitBlock(RABlock* block) noexcept {
+ block->addFlags(RABlock::kFlagIsFuncExit);
+ return _exits.append(allocator(), block);
+ }
+
+ ASMJIT_INLINE RAInst* newRAInst(RABlock* block, uint32_t flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
+ void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) RAInst(block, flags, tiedRegCount, clobberedRegs);
+ }
+
+ ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
+ uint32_t tiedRegCount = ib.tiedRegCount();
+ RAInst* raInst = newRAInst(block, ib.aggregatedFlags(), tiedRegCount, ib._clobbered);
+
+ if (ASMJIT_UNLIKELY(!raInst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ RARegIndex index;
+ uint32_t flagsFilter = ~ib.forbiddenFlags();
+
+ index.buildIndexes(ib._count);
+ raInst->_tiedIndex = index;
+ raInst->_tiedCount = ib._count;
+
+ for (uint32_t i = 0; i < tiedRegCount; i++) {
+ RATiedReg* tiedReg = ib[i];
+ RAWorkReg* workReg = workRegById(tiedReg->workId());
+
+ workReg->resetTiedReg();
+ uint32_t group = workReg->group();
+
+ if (tiedReg->hasUseId()) {
+ block->addFlags(RABlock::kFlagHasFixedRegs);
+ raInst->_usedRegs[group] |= Support::bitMask(tiedReg->useId());
+ }
+
+ if (tiedReg->hasOutId()) {
+ block->addFlags(RABlock::kFlagHasFixedRegs);
+ }
+
+ RATiedReg& dst = raInst->_tiedRegs[index[group]++];
+ dst = *tiedReg;
+ dst._flags &= flagsFilter;
+
+ if (!tiedReg->isDuplicate())
+ dst._allocableRegs &= ~ib._used[group];
+ }
+
+ node->setPassData<RAInst>(raInst);
+ return kErrorOk;
+ }
+
+ //! \}
+
+ //! \name CFG - Build CFG
+ //! \{
+
+ //! Traverse the whole function and do the following:
+ //!
+ //! 1. Construct CFG (represented by `RABlock`) by populating `_blocks` and
+ //! `_exits`. Blocks describe the control flow of the function and contain
+ //! some additional information that is used by the register allocator.
+ //!
+ //! 2. Remove unreachable code immediately. This is not strictly necessary
+ //! for BaseCompiler itself as the register allocator cannot reach such
+ //! nodes, but keeping instructions that use virtual registers would fail
+ //! during instruction encoding phase (Assembler).
+ //!
+ //! 3. `RAInst` is created for each `InstNode` or compatible. It contains
+ //! information that is essential for further analysis and register
+ //! allocation.
+ //!
+ //! Use `RACFGBuilder` template that provides the necessary boilerplate.
+ virtual Error buildCFG() noexcept = 0;
+
+ //! Called after the CFG is built.
+ Error initSharedAssignments(const ZoneVector<uint32_t>& sharedAssignmentsMap) noexcept;
+
+ //! \}
+
+ //! \name CFG - Views Order
+ //! \{
+
+ //! Constructs CFG views (only POV at the moment).
+ Error buildViews() noexcept;
+
+ //! \}
+
+ //! \name CFG - Dominators
+ //! \{
+
+ // Terminology:
+ // - A node `X` dominates a node `Z` if any path from the entry point to
+ // `Z` has to go through `X`.
+ // - A node `Z` post-dominates a node `X` if any path from `X` to the end
+ // of the graph has to go through `Z`.
+
+ //! Constructs a dominator-tree from CFG.
+ Error buildDominators() noexcept;
+
+ bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept;
+ const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept;
+
+ //! Tests whether the basic block `a` dominates `b` - non-strict, returns true when `a == b`.
+ inline bool dominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? true : _strictlyDominates(a, b); }
+ //! Tests whether the basic block `a` dominates `b` - strict dominance check, returns false when `a == b`.
+ inline bool strictlyDominates(const RABlock* a, const RABlock* b) const noexcept { return a == b ? false : _strictlyDominates(a, b); }
+
+ //! Returns a nearest common dominator of `a` and `b`.
+ inline RABlock* nearestCommonDominator(RABlock* a, RABlock* b) const noexcept { return const_cast<RABlock*>(_nearestCommonDominator(a, b)); }
+ //! Returns a nearest common dominator of `a` and `b` (const).
+ inline const RABlock* nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept { return _nearestCommonDominator(a, b); }
+
+ //! \}
+
+ //! \name CFG - Utilities
+ //! \{
+
+ Error removeUnreachableBlocks() noexcept;
+
+ //! Returns `node` or some node after that is ideal for beginning a new block.
+ //! This function is mostly used after a conditional or unconditional jump to
+ //! select the successor node. In some cases the next node could be a label,
+ //! which means it could have assigned some block already.
+ BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept;
+
+ //! Returns `true` of the `node` can flow to `target` without reaching code
+ //! nor data. It's used to eliminate jumps to labels that are next right to
+ //! them.
+ bool isNextTo(BaseNode* node, BaseNode* target) noexcept;
+
+ //! \}
+
+ //! \name Virtual Register Management
+ //! \{
+
+ //! Returns a native size of the general-purpose register of the target architecture.
+ inline uint32_t gpSize() const noexcept { return _sp.size(); }
+ inline uint32_t availableRegCount(uint32_t group) const noexcept { return _availableRegCount[group]; }
+
+ inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; }
+
+ inline RAWorkRegs& workRegs() noexcept { return _workRegs; }
+ inline RAWorkRegs& workRegs(uint32_t group) noexcept { return _workRegsOfGroup[group]; }
+
+ inline const RAWorkRegs& workRegs() const noexcept { return _workRegs; }
+ inline const RAWorkRegs& workRegs(uint32_t group) const noexcept { return _workRegsOfGroup[group]; }
+
+ inline uint32_t workRegCount() const noexcept { return _workRegs.size(); }
+ inline uint32_t workRegCount(uint32_t group) const noexcept { return _workRegsOfGroup[group].size(); }
+
+ inline void _buildPhysIndex() noexcept {
+ _physRegIndex.buildIndexes(_physRegCount);
+ _physRegTotal = uint32_t(_physRegIndex[BaseReg::kGroupVirt - 1]) +
+ uint32_t(_physRegCount[BaseReg::kGroupVirt - 1]) ;
+ }
+ inline uint32_t physRegIndex(uint32_t group) const noexcept { return _physRegIndex[group]; }
+ inline uint32_t physRegTotal() const noexcept { return _physRegTotal; }
+
+ Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept;
+
+ //! Creates `RAWorkReg` data for the given `vReg`. The function does nothing
+ //! if `vReg` already contains link to `RAWorkReg`. Called by `constructBlocks()`.
+ inline Error asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept {
+ *out = vReg->workReg();
+ return *out ? kErrorOk : _asWorkReg(vReg, out);
+ }
+
+ inline Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
+ const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs();
+ if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size()))
+ return DebugUtils::errored(kErrorInvalidVirtId);
+ return asWorkReg(virtRegs[vIndex], out);
+ }
+
+ inline RAStackSlot* getOrCreateStackSlot(RAWorkReg* workReg) noexcept {
+ RAStackSlot* slot = workReg->stackSlot();
+ if (slot) return slot;
+
+ slot = _stackAllocator.newSlot(_sp.id(), workReg->virtReg()->virtSize(), workReg->virtReg()->alignment(), 0);
+ workReg->_stackSlot = slot;
+ workReg->markStackUsed();
+ return slot;
+ }
+
+ inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept {
+ getOrCreateStackSlot(workReg);
+ return BaseMem(BaseMem::Decomposed { _sp.type(), workReg->virtId(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
+ }
+
+ WorkToPhysMap* newWorkToPhysMap() noexcept;
+ PhysToWorkMap* newPhysToWorkMap() noexcept;
+
+ inline PhysToWorkMap* clonePhysToWorkMap(const PhysToWorkMap* map) noexcept {
+ size_t size = PhysToWorkMap::sizeOf(_physRegTotal);
+ return static_cast<PhysToWorkMap*>(zone()->dupAligned(map, size, sizeof(uint32_t)));
+ }
+
+ inline WorkToPhysMap* cloneWorkToPhysMap(const WorkToPhysMap* map) noexcept {
+ size_t size = WorkToPhysMap::sizeOf(_workRegs.size());
+ if (ASMJIT_UNLIKELY(size == 0))
+ return const_cast<WorkToPhysMap*>(map);
+ return static_cast<WorkToPhysMap*>(zone()->dup(map, size));
+ }
+
+ //! \name Liveness Analysis & Statistics
+ //! \{
+
+ //! 1. Calculates GEN/KILL/IN/OUT of each block.
+ //! 2. Calculates live spans and basic statistics of each work register.
+ Error buildLiveness() noexcept;
+
+ //! Assigns argIndex to WorkRegs. Must be called after the liveness analysis
+ //! finishes as it checks whether the argument is live upon entry.
+ Error assignArgIndexToWorkRegs() noexcept;
+
+ //! \}
+
+ //! \name Register Allocation - Global
+ //! \{
+
+ //! Runs a global register allocator.
+ Error runGlobalAllocator() noexcept;
+
+ //! Initializes data structures used for global live spans.
+ Error initGlobalLiveSpans() noexcept;
+
+ Error binPack(uint32_t group) noexcept;
+
+ //! \}
+
+ //! \name Register Allocation - Local
+ //! \{
+
+ //! Runs a local register allocator.
+ Error runLocalAllocator() noexcept;
+ Error setBlockEntryAssignment(RABlock* block, const RABlock* fromBlock, const RAAssignment& fromAssignment) noexcept;
+ Error setSharedAssignment(uint32_t sharedAssignmentId, const RAAssignment& fromAssignment) noexcept;
+
+ //! Called after the RA assignment has been assigned to a block.
+ //!
+ //! This cannot change the assignment, but can examine it.
+ Error blockEntryAssigned(const RAAssignment& as) noexcept;
+
+ //! \}
+
+ //! \name Register Allocation Utilities
+ //! \{
+
+ Error useTemporaryMem(BaseMem& out, uint32_t size, uint32_t alignment) noexcept;
+
+ //! \}
+
+ //! \name Function Prolog & Epilog
+ //! \{
+
+ Error updateStackFrame() noexcept;
+ Error _markStackArgsToKeep() noexcept;
+ Error _updateStackArgs() noexcept;
+ Error insertPrologEpilog() noexcept;
+
+ //! \}
+
+ //! \name Instruction Rewriter
+ //! \{
+
+ Error rewrite() noexcept;
+ Error _rewrite(BaseNode* first, BaseNode* stop) noexcept;
+
+ //! \}
+
+#ifndef ASMJIT_NO_LOGGING
+ //! \name Logging
+ //! \{
+
+ Error annotateCode() noexcept;
+
+ Error _dumpBlockIds(String& sb, const RABlocks& blocks) noexcept;
+ Error _dumpBlockLiveness(String& sb, const RABlock* block) noexcept;
+ Error _dumpLiveSpans(String& sb) noexcept;
+
+ //! \}
+#endif
+
+ //! \name Emit
+ //! \{
+
+ virtual Error onEmitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept = 0;
+ virtual Error onEmitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept = 0;
+
+ virtual Error onEmitLoad(uint32_t workId, uint32_t dstPhysId) noexcept = 0;
+ virtual Error onEmitSave(uint32_t workId, uint32_t srcPhysId) noexcept = 0;
+
+ virtual Error onEmitJump(const Label& label) noexcept = 0;
+ virtual Error onEmitPreCall(FuncCallNode* call) noexcept = 0;
+
+ //! \}
+};
+
+inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocator(); }
+
+inline uint32_t RABlock::entryScratchGpRegs() const noexcept {
+ uint32_t regs = _entryScratchGpRegs;
+ if (hasSharedAssignmentId())
+ regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs();
+ return regs;
+}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RAPASS_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/rastack.cpp b/3rdparty/asmjit/src/asmjit/core/rastack.cpp
new file mode 100644
index 00000000000..342b7ce7c9e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/rastack.cpp
@@ -0,0 +1,207 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/rastack_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::RAStackAllocator - Slots]
+// ============================================================================
+
+RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
+ if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
+ return nullptr;
+
+ RAStackSlot* slot = allocator()->allocT<RAStackSlot>();
+ if (ASMJIT_UNLIKELY(!slot))
+ return nullptr;
+
+ slot->_baseRegId = uint8_t(baseRegId);
+ slot->_alignment = uint8_t(Support::max<uint32_t>(alignment, 1));
+ slot->_reserved[0] = 0;
+ slot->_reserved[1] = 0;
+ slot->_useCount = 0;
+ slot->_size = size;
+ slot->_flags = flags;
+
+ slot->_weight = 0;
+ slot->_offset = 0;
+
+ _alignment = Support::max<uint32_t>(_alignment, alignment);
+ _slots.appendUnsafe(slot);
+ return slot;
+}
+
+// ============================================================================
+// [asmjit::RAStackAllocator - Utilities]
+// ============================================================================
+
+struct RAStackGap {
+ inline RAStackGap() noexcept
+ : offset(0),
+ size(0) {}
+
+ inline RAStackGap(uint32_t offset, uint32_t size) noexcept
+ : offset(offset),
+ size(size) {}
+
+ inline RAStackGap(const RAStackGap& other) noexcept
+ : offset(other.offset),
+ size(other.size) {}
+
+ uint32_t offset;
+ uint32_t size;
+};
+
+Error RAStackAllocator::calculateStackFrame() noexcept {
+ // Base weight added to all registers regardless of their size and alignment.
+ uint32_t kBaseRegWeight = 16;
+
+ // STEP 1:
+ //
+ // Update usage based on the size of the slot. We boost smaller slots in a way
+ // that 32-bit register has higher priority than a 128-bit register, however,
+ // if one 128-bit register is used 4 times more than some other 32-bit register
+ // it will overweight it.
+ for (RAStackSlot* slot : _slots) {
+ uint32_t alignment = slot->alignment();
+ ASMJIT_ASSERT(alignment > 0);
+
+ uint32_t power = Support::ctz(alignment);
+ uint64_t weight;
+
+ if (slot->isRegHome())
+ weight = kBaseRegWeight + (uint64_t(slot->useCount()) * (7 - power));
+ else
+ weight = power;
+
+ // If overflown, which has less chance of winning a lottery, just use max
+ // possible weight. In such case it probably doesn't matter at all.
+ if (weight > 0xFFFFFFFFu)
+ weight = 0xFFFFFFFFu;
+
+ slot->setWeight(uint32_t(weight));
+ }
+
+ // STEP 2:
+ //
+ // Sort stack slots based on their newly calculated weight (in descending order).
+ _slots.sort([](const RAStackSlot* a, const RAStackSlot* b) noexcept {
+ return a->weight() > b->weight() ? 1 :
+ a->weight() == b->weight() ? 0 : -1;
+ });
+
+ // STEP 3:
+ //
+ // Calculate offset of each slot. We start from the slot that has the highest
+ // weight and advance to slots with lower weight. It could look that offsets
+ // start from the first slot in our list and then simply increase, but it's
+ // not always the case as we also try to fill all gaps introduced by the fact
+ // that slots are sorted by weight and not by size & alignment, so when we need
+ // to align some slot we distribute the gap caused by the alignment to `gaps`.
+ uint32_t offset = 0;
+ ZoneVector<RAStackGap> gaps[kSizeCount - 1];
+
+ for (RAStackSlot* slot : _slots) {
+ if (slot->isStackArg()) continue;
+
+ uint32_t slotAlignment = slot->alignment();
+ uint32_t alignedOffset = Support::alignUp(offset, slotAlignment);
+
+ // Try to find a slot within gaps first, before advancing the `offset`.
+ bool foundGap = false;
+ uint32_t gapSize = 0;
+ uint32_t gapOffset = 0;
+
+ {
+ uint32_t slotSize = slot->size();
+ if (slotSize < (1u << uint32_t(ASMJIT_ARRAY_SIZE(gaps)))) {
+ // Iterate from the lowest to the highest possible.
+ uint32_t index = Support::ctz(slotSize);
+ do {
+ if (!gaps[index].empty()) {
+ RAStackGap gap = gaps[index].pop();
+
+ ASMJIT_ASSERT(Support::isAligned(gap.offset, slotAlignment));
+ slot->setOffset(int32_t(gap.offset));
+
+ gapSize = gap.size - slotSize;
+ gapOffset = gap.offset - slotSize;
+
+ foundGap = true;
+ break;
+ }
+ } while (++index < uint32_t(ASMJIT_ARRAY_SIZE(gaps)));
+ }
+ }
+
+ // No gap found, we may create a new one(s) if the current offset is not aligned.
+ if (!foundGap && offset != alignedOffset) {
+ gapSize = alignedOffset - offset;
+ gapOffset = alignedOffset;
+
+ offset = alignedOffset;
+ }
+
+ // True if we have found a gap and not filled all of it or we aligned the current offset.
+ if (gapSize) {
+ uint32_t gapEnd = gapSize + gapOffset;
+ while (gapOffset < gapEnd) {
+ uint32_t index = Support::ctz(gapOffset);
+ uint32_t slotSize = 1u << index;
+
+ // Weird case, better to bail...
+ if (gapEnd - gapOffset < slotSize)
+ break;
+
+ ASMJIT_PROPAGATE(gaps[index].append(allocator(), RAStackGap(gapOffset, slotSize)));
+ gapOffset += slotSize;
+ }
+ }
+
+ if (!foundGap) {
+ ASMJIT_ASSERT(Support::isAligned(offset, slotAlignment));
+ slot->setOffset(int32_t(offset));
+ offset += slot->size();
+ }
+ }
+
+ _stackSize = Support::alignUp(offset, _alignment);
+ return kErrorOk;
+}
+
+Error RAStackAllocator::adjustSlotOffsets(int32_t offset) noexcept {
+ for (RAStackSlot* slot : _slots)
+ if (!slot->isStackArg())
+ slot->_offset += offset;
+ return kErrorOk;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/core/rastack_p.h b/3rdparty/asmjit/src/asmjit/core/rastack_p.h
new file mode 100644
index 00000000000..d45f7aa8896
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/rastack_p.h
@@ -0,0 +1,183 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_RASTACK_P_H_INCLUDED
+#define ASMJIT_CORE_RASTACK_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/radefs_p.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_ra
+//! \{
+
+// ============================================================================
+// [asmjit::RAStackSlot]
+// ============================================================================
+
+//! Stack slot.
+struct RAStackSlot {
+ enum Flags : uint32_t {
+ // TODO: kFlagRegHome is apparently not used, but isRegHome() is.
+ kFlagRegHome = 0x00000001u, //!< Stack slot is register home slot.
+ kFlagStackArg = 0x00000002u //!< Stack slot position matches argument passed via stack.
+ };
+
+ enum ArgIndex : uint32_t {
+ kNoArgIndex = 0xFF
+ };
+
+ //! Base register used to address the stack.
+ uint8_t _baseRegId;
+ //! Minimum alignment required by the slot.
+ uint8_t _alignment;
+ //! Reserved for future use.
+ uint8_t _reserved[2];
+ //! Size of memory required by the slot.
+ uint32_t _size;
+ //! Slot flags.
+ uint32_t _flags;
+
+ //! Usage counter (one unit equals one memory access).
+ uint32_t _useCount;
+ //! Weight of the slot (calculated by `calculateStackFrame()`).
+ uint32_t _weight;
+ //! Stack offset (calculated by `calculateStackFrame()`).
+ int32_t _offset;
+
+ //! \name Accessors
+ //! \{
+
+ inline uint32_t baseRegId() const noexcept { return _baseRegId; }
+ inline void setBaseRegId(uint32_t id) noexcept { _baseRegId = uint8_t(id); }
+
+ inline uint32_t size() const noexcept { return _size; }
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ inline uint32_t flags() const noexcept { return _flags; }
+ inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
+ inline bool isRegHome() const noexcept { return (_flags & kFlagRegHome) != 0; }
+ inline bool isStackArg() const noexcept { return (_flags & kFlagStackArg) != 0; }
+
+ inline uint32_t useCount() const noexcept { return _useCount; }
+ inline void addUseCount(uint32_t n = 1) noexcept { _useCount += n; }
+
+ inline uint32_t weight() const noexcept { return _weight; }
+ inline void setWeight(uint32_t weight) noexcept { _weight = weight; }
+
+ inline int32_t offset() const noexcept { return _offset; }
+ inline void setOffset(int32_t offset) noexcept { _offset = offset; }
+
+ //! \}
+};
+
+typedef ZoneVector<RAStackSlot*> RAStackSlots;
+
+// ============================================================================
+// [asmjit::RAStackAllocator]
+// ============================================================================
+
+//! Stack allocator.
+class RAStackAllocator {
+public:
+ ASMJIT_NONCOPYABLE(RAStackAllocator)
+
+ enum Size : uint32_t {
+ kSize1 = 0,
+ kSize2 = 1,
+ kSize4 = 2,
+ kSize8 = 3,
+ kSize16 = 4,
+ kSize32 = 5,
+ kSize64 = 6,
+ kSizeCount = 7
+ };
+
+ //! Allocator used to allocate internal data.
+ ZoneAllocator* _allocator;
+ //! Count of bytes used by all slots.
+ uint32_t _bytesUsed;
+ //! Calculated stack size (can be a bit greater than `_bytesUsed`).
+ uint32_t _stackSize;
+ //! Minimum stack alignment.
+ uint32_t _alignment;
+ //! Stack slots vector.
+ RAStackSlots _slots;
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline RAStackAllocator() noexcept
+ : _allocator(nullptr),
+ _bytesUsed(0),
+ _stackSize(0),
+ _alignment(1),
+ _slots() {}
+
+ inline void reset(ZoneAllocator* allocator) noexcept {
+ _allocator = allocator;
+ _bytesUsed = 0;
+ _stackSize = 0;
+ _alignment = 1;
+ _slots.reset();
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline ZoneAllocator* allocator() const noexcept { return _allocator; }
+
+ inline uint32_t bytesUsed() const noexcept { return _bytesUsed; }
+ inline uint32_t stackSize() const noexcept { return _stackSize; }
+ inline uint32_t alignment() const noexcept { return _alignment; }
+
+ inline RAStackSlots& slots() noexcept { return _slots; }
+ inline const RAStackSlots& slots() const noexcept { return _slots; }
+ inline uint32_t slotCount() const noexcept { return _slots.size(); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ RAStackSlot* newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags = 0) noexcept;
+
+ Error calculateStackFrame() noexcept;
+ Error adjustSlotOffsets(int32_t offset) noexcept;
+
+ //! \}
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_CORE_RASTACK_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/string.cpp b/3rdparty/asmjit/src/asmjit/core/string.cpp
new file mode 100644
index 00000000000..564a566f042
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/string.cpp
@@ -0,0 +1,545 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/string.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::String - Globals]
+// ============================================================================
+
+static const char String_baseN[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+
+constexpr size_t kMinAllocSize = 64;
+constexpr size_t kMaxAllocSize = std::numeric_limits<size_t>::max() - Globals::kGrowThreshold;
+
+// ============================================================================
+// [asmjit::String]
+// ============================================================================
+
+Error String::reset() noexcept {
+ if (_type == kTypeLarge)
+ ::free(_large.data);
+
+ _resetInternal();
+ return kErrorOk;
+}
+
+Error String::clear() noexcept {
+ if (isLarge()) {
+ _large.size = 0;
+ _large.data[0] = '\0';
+ }
+ else {
+ _raw.uptr[0] = 0;
+ }
+
+ return kErrorOk;
+}
+
+char* String::prepare(uint32_t op, size_t size) noexcept {
+ char* curData;
+ size_t curSize;
+ size_t curCapacity;
+
+ if (isLarge()) {
+ curData = this->_large.data;
+ curSize = this->_large.size;
+ curCapacity = this->_large.capacity;
+ }
+ else {
+ curData = this->_small.data;
+ curSize = this->_small.type;
+ curCapacity = kSSOCapacity;
+ }
+
+ if (op == kOpAssign) {
+ if (size > curCapacity) {
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(size >= kMaxAllocSize))
+ return nullptr;
+
+ size_t newCapacity = Support::alignUp<size_t>(size + 1, kMinAllocSize);
+ char* newData = static_cast<char*>(::malloc(newCapacity));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return nullptr;
+
+ if (_type == kTypeLarge)
+ ::free(curData);
+
+ _large.type = kTypeLarge;
+ _large.size = size;
+ _large.capacity = newCapacity - 1;
+ _large.data = newData;
+
+ newData[size] = '\0';
+ return newData;
+ }
+ else {
+ _setSize(size);
+ curData[size] = '\0';
+ return curData;
+ }
+ }
+ else {
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(size >= kMaxAllocSize - curSize))
+ return nullptr;
+
+ size_t newSize = size + curSize;
+ size_t newSizePlusOne = newSize + 1;
+
+ if (newSizePlusOne > curCapacity) {
+ size_t newCapacity = Support::max<size_t>(curCapacity + 1, kMinAllocSize);
+
+ if (newCapacity < newSizePlusOne && newCapacity < Globals::kGrowThreshold)
+ newCapacity = Support::alignUpPowerOf2(newCapacity);
+
+ if (newCapacity < newSizePlusOne)
+ newCapacity = Support::alignUp(newSizePlusOne, Globals::kGrowThreshold);
+
+ if (ASMJIT_UNLIKELY(newCapacity < newSizePlusOne))
+ return nullptr;
+
+ char* newData = static_cast<char*>(::malloc(newCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return nullptr;
+
+ memcpy(newData, curData, curSize);
+
+ if (_type == kTypeLarge)
+ ::free(curData);
+
+ _large.type = kTypeLarge;
+ _large.size = newSize;
+ _large.capacity = newCapacity - 1;
+ _large.data = newData;
+
+ newData[newSize] = '\0';
+ return newData + curSize;
+ }
+ else {
+ _setSize(newSize);
+ curData[newSize] = '\0';
+ return curData + curSize;
+ }
+ }
+}
+
+Error String::assignString(const char* data, size_t size) noexcept {
+ char* dst = nullptr;
+
+ // Null terminated string without `size` specified.
+ if (size == SIZE_MAX)
+ size = data ? strlen(data) : size_t(0);
+
+ if (isLarge()) {
+ if (size <= _large.capacity) {
+ dst = _large.data;
+ _large.size = size;
+ }
+ else {
+ size_t capacityPlusOne = Support::alignUp(size + 1, 32);
+ if (ASMJIT_UNLIKELY(capacityPlusOne < size))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = static_cast<char*>(::malloc(capacityPlusOne));
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ if (!isExternal())
+ ::free(_large.data);
+
+ _large.type = kTypeLarge;
+ _large.data = dst;
+ _large.size = size;
+ _large.capacity = capacityPlusOne - 1;
+ }
+ }
+ else {
+ if (size <= kSSOCapacity) {
+ ASMJIT_ASSERT(size < 0xFFu);
+
+ dst = _small.data;
+ _small.type = uint8_t(size);
+ }
+ else {
+ dst = static_cast<char*>(::malloc(size + 1));
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ _large.type = kTypeLarge;
+ _large.data = dst;
+ _large.size = size;
+ _large.capacity = size;
+ }
+ }
+
+ // Optionally copy data from `data` and null-terminate.
+ if (data && size) {
+ // NOTE: It's better to use `memmove()`. If, for any reason, somebody uses
+ // this function to substring the same string it would work as expected.
+ ::memmove(dst, data, size);
+ }
+
+ dst[size] = '\0';
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::String - Operations]
+// ============================================================================
+
+Error String::_opString(uint32_t op, const char* str, size_t size) noexcept {
+ if (size == SIZE_MAX)
+ size = str ? strlen(str) : size_t(0);
+
+ if (!size)
+ return kErrorOk;
+
+ char* p = prepare(op, size);
+ if (!p) return DebugUtils::errored(kErrorOutOfMemory);
+
+ memcpy(p, str, size);
+ return kErrorOk;
+}
+
+Error String::_opChar(uint32_t op, char c) noexcept {
+ char* p = prepare(op, 1);
+ if (!p) return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = c;
+ return kErrorOk;
+}
+
+Error String::_opChars(uint32_t op, char c, size_t n) noexcept {
+ if (!n)
+ return kErrorOk;
+
+ char* p = prepare(op, n);
+ if (!p) return DebugUtils::errored(kErrorOutOfMemory);
+
+ memset(p, c, n);
+ return kErrorOk;
+}
+
+Error String::padEnd(size_t n, char c) noexcept {
+ size_t size = this->size();
+ return n > size ? appendChars(c, n - size) : kErrorOk;
+}
+
+Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
+ if (base < 2 || base > 36)
+ base = 10;
+
+ char buf[128];
+ char* p = buf + ASMJIT_ARRAY_SIZE(buf);
+
+ uint64_t orig = i;
+ char sign = '\0';
+
+ // --------------------------------------------------------------------------
+ // [Sign]
+ // --------------------------------------------------------------------------
+
+ if ((flags & kFormatSigned) != 0 && int64_t(i) < 0) {
+ i = uint64_t(-int64_t(i));
+ sign = '-';
+ }
+ else if ((flags & kFormatShowSign) != 0) {
+ sign = '+';
+ }
+ else if ((flags & kFormatShowSpace) != 0) {
+ sign = ' ';
+ }
+
+ // --------------------------------------------------------------------------
+ // [Number]
+ // --------------------------------------------------------------------------
+
+ do {
+ uint64_t d = i / base;
+ uint64_t r = i % base;
+
+ *--p = String_baseN[r];
+ i = d;
+ } while (i);
+
+ size_t numberSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
+
+ // --------------------------------------------------------------------------
+ // [Alternate Form]
+ // --------------------------------------------------------------------------
+
+ if ((flags & kFormatAlternate) != 0) {
+ if (base == 8) {
+ if (orig != 0)
+ *--p = '0';
+ }
+ if (base == 16) {
+ *--p = 'x';
+ *--p = '0';
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Width]
+ // --------------------------------------------------------------------------
+
+ if (sign != 0)
+ *--p = sign;
+
+ if (width > 256)
+ width = 256;
+
+ if (width <= numberSize)
+ width = 0;
+ else
+ width -= numberSize;
+
+ // --------------------------------------------------------------------------
+ // Write]
+ // --------------------------------------------------------------------------
+
+ size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
+ char* data = prepare(op, prefixSize + width + numberSize);
+
+ if (!data)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ memcpy(data, p, prefixSize);
+ data += prefixSize;
+
+ memset(data, '0', width);
+ data += width;
+
+ memcpy(data, p + prefixSize, numberSize);
+ return kErrorOk;
+}
+
+Error String::_opHex(uint32_t op, const void* data, size_t size, char separator) noexcept {
+ char* dst;
+ const uint8_t* src = static_cast<const uint8_t*>(data);
+
+ if (!size)
+ return kErrorOk;
+
+ if (separator) {
+ if (ASMJIT_UNLIKELY(size >= std::numeric_limits<size_t>::max() / 3))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = prepare(op, size * 3 - 1);
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ size_t i = 0;
+ for (;;) {
+ dst[0] = String_baseN[(src[0] >> 4) & 0xF];
+ dst[1] = String_baseN[(src[0] ) & 0xF];
+ if (++i == size)
+ break;
+ // This makes sure that the separator is only put between two hexadecimal bytes.
+ dst[2] = separator;
+ dst += 3;
+ src++;
+ }
+ }
+ else {
+ if (ASMJIT_UNLIKELY(size >= std::numeric_limits<size_t>::max() / 2))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ dst = prepare(op, size * 2);
+ if (ASMJIT_UNLIKELY(!dst))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ for (size_t i = 0; i < size; i++, dst += 2, src++) {
+ dst[0] = String_baseN[(src[0] >> 4) & 0xF];
+ dst[1] = String_baseN[(src[0] ) & 0xF];
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error String::_opFormat(uint32_t op, const char* fmt, ...) noexcept {
+ Error err;
+ va_list ap;
+
+ va_start(ap, fmt);
+ err = _opVFormat(op, fmt, ap);
+ va_end(ap);
+
+ return err;
+}
+
+Error String::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
+ size_t startAt = (op == kOpAssign) ? size_t(0) : size();
+ size_t remainingCapacity = capacity() - startAt;
+
+ char buf[1024];
+ int fmtResult;
+ size_t outputSize;
+
+ if (remainingCapacity >= 128) {
+ fmtResult = vsnprintf(data() + startAt, remainingCapacity, fmt, ap);
+ outputSize = size_t(fmtResult);
+
+ if (ASMJIT_LIKELY(outputSize <= remainingCapacity)) {
+ _setSize(startAt + outputSize);
+ return kErrorOk;
+ }
+ }
+ else {
+ fmtResult = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
+ outputSize = size_t(fmtResult);
+
+ if (ASMJIT_LIKELY(outputSize < ASMJIT_ARRAY_SIZE(buf)))
+ return _opString(op, buf, outputSize);
+ }
+
+ if (ASMJIT_UNLIKELY(fmtResult < 0))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ char* p = prepare(op, outputSize);
+ if (ASMJIT_UNLIKELY(!p))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ fmtResult = vsnprintf(p, outputSize + 1, fmt, ap);
+ ASMJIT_ASSERT(size_t(fmtResult) == outputSize);
+
+ return kErrorOk;
+}
+
+Error String::truncate(size_t newSize) noexcept {
+ if (isLarge()) {
+ if (newSize < _large.size) {
+ _large.data[newSize] = '\0';
+ _large.size = newSize;
+ }
+ }
+ else {
+ if (newSize < _type) {
+ _small.data[newSize] = '\0';
+ _small.type = uint8_t(newSize);
+ }
+ }
+
+ return kErrorOk;
+}
+
+bool String::eq(const char* other, size_t size) const noexcept {
+ const char* aData = data();
+ const char* bData = other;
+
+ size_t aSize = this->size();
+ size_t bSize = size;
+
+ if (bSize == SIZE_MAX) {
+ size_t i;
+ for (i = 0; i < aSize; i++)
+ if (aData[i] != bData[i] || bData[i] == 0)
+ return false;
+ return bData[i] == 0;
+ }
+ else {
+ if (aSize != bSize)
+ return false;
+ return ::memcmp(aData, bData, aSize) == 0;
+ }
+}
+
+// ============================================================================
+// [asmjit::Support - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(core_string) {
+ String s;
+
+ EXPECT(s.isLarge() == false);
+ EXPECT(s.isExternal() == false);
+
+ EXPECT(s.assignChar('a') == kErrorOk);
+ EXPECT(s.size() == 1);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'a');
+ EXPECT(s.data()[1] == '\0');
+ EXPECT(s.eq("a") == true);
+ EXPECT(s.eq("a", 1) == true);
+
+ EXPECT(s.assignChars('b', 4) == kErrorOk);
+ EXPECT(s.size() == 4);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'b');
+ EXPECT(s.data()[1] == 'b');
+ EXPECT(s.data()[2] == 'b');
+ EXPECT(s.data()[3] == 'b');
+ EXPECT(s.data()[4] == '\0');
+ EXPECT(s.eq("bbbb") == true);
+ EXPECT(s.eq("bbbb", 4) == true);
+
+ EXPECT(s.assignString("abc") == kErrorOk);
+ EXPECT(s.size() == 3);
+ EXPECT(s.capacity() == String::kSSOCapacity);
+ EXPECT(s.data()[0] == 'a');
+ EXPECT(s.data()[1] == 'b');
+ EXPECT(s.data()[2] == 'c');
+ EXPECT(s.data()[3] == '\0');
+ EXPECT(s.eq("abc") == true);
+ EXPECT(s.eq("abc", 3) == true);
+
+ const char* large = "Large string that will not fit into SSO buffer";
+ EXPECT(s.assignString(large) == kErrorOk);
+ EXPECT(s.isLarge() == true);
+ EXPECT(s.size() == strlen(large));
+ EXPECT(s.capacity() > String::kSSOCapacity);
+ EXPECT(s.eq(large) == true);
+ EXPECT(s.eq(large, strlen(large)) == true);
+
+ const char* additional = " (additional content)";
+ EXPECT(s.isLarge() == true);
+ EXPECT(s.appendString(additional) == kErrorOk);
+ EXPECT(s.size() == strlen(large) + strlen(additional));
+
+ EXPECT(s.clear() == kErrorOk);
+ EXPECT(s.size() == 0);
+ EXPECT(s.empty() == true);
+ EXPECT(s.data()[0] == '\0');
+ EXPECT(s.isLarge() == true); // Clear should never release the memory.
+
+ EXPECT(s.appendUInt(1234) == kErrorOk);
+ EXPECT(s.eq("1234") == true);
+
+ StringTmp<64> sTmp;
+ EXPECT(sTmp.isLarge());
+ EXPECT(sTmp.isExternal());
+ EXPECT(sTmp.appendChars(' ', 1000) == kErrorOk);
+ EXPECT(!sTmp.isExternal());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/string.h b/3rdparty/asmjit/src/asmjit/core/string.h
new file mode 100644
index 00000000000..22108f21f8b
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/string.h
@@ -0,0 +1,352 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_STRING_H_INCLUDED
+#define ASMJIT_CORE_STRING_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_support
+//! \{
+
+// ============================================================================
+// [asmjit::String]
+// ============================================================================
+
+//! A simple non-reference counted string that uses small string optimization (SSO).
+//!
+//! This string has 3 allocation possibilities:
+//!
+//! 1. Small - embedded buffer is used for up to `kSSOCapacity` characters.
+//! This should handle most small strings and thus avoid dynamic
+//! memory allocation for most use-cases.
+//!
+//! 2. Large - string that doesn't fit into an embedded buffer (or string
+//! that was truncated from a larger buffer) and is owned by
+//! AsmJit. When you destroy the string AsmJit would automatically
+//! release the large buffer.
+//!
+//! 3. External - like Large (2), however, the large buffer is not owned by
+//! AsmJit and won't be released when the string is destroyed
+//! or reallocated. This is mostly useful for working with
+//! larger temporary strings allocated on stack or with immutable
+//! strings.
+class String {
+public:
+ ASMJIT_NONCOPYABLE(String)
+
+ //! String operation.
+ enum Op : uint32_t {
+ kOpAssign = 0,
+ kOpAppend = 1
+ };
+
+ //! String format flags.
+ enum FormatFlags : uint32_t {
+ kFormatShowSign = 0x00000001u,
+ kFormatShowSpace = 0x00000002u,
+ kFormatAlternate = 0x00000004u,
+ kFormatSigned = 0x80000000u
+ };
+
+ //! \cond INTERNAL
+ enum : uint32_t {
+ kLayoutSize = 32,
+ kSSOCapacity = kLayoutSize - 2
+ };
+
+ //! String type.
+ enum Type : uint8_t {
+ kTypeLarge = 0x1Fu, //!< Large string (owned by String).
+ kTypeExternal = 0x20u //!< External string (zone allocated or not owned by String).
+ };
+
+ union Raw {
+ uint8_t u8[kLayoutSize];
+ uint64_t u64[kLayoutSize / sizeof(uint64_t)];
+ uintptr_t uptr[kLayoutSize / sizeof(uintptr_t)];
+ };
+
+ struct Small {
+ uint8_t type;
+ char data[kSSOCapacity + 1u];
+ };
+
+ struct Large {
+ uint8_t type;
+ uint8_t reserved[sizeof(uintptr_t) - 1];
+ size_t size;
+ size_t capacity;
+ char* data;
+ };
+
+ union {
+ uint8_t _type;
+ Raw _raw;
+ Small _small;
+ Large _large;
+ };
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline String() noexcept
+ : _small {} {}
+
+ inline String(String&& other) noexcept {
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
+ _raw.uptr[i] = other._raw.uptr[i];
+ other._resetInternal();
+ }
+
+ inline ~String() noexcept {
+ reset();
+ }
+
+ //! Reset the string into a construction state.
+ ASMJIT_API Error reset() noexcept;
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline bool operator==(const char* other) const noexcept { return eq(other); }
+ inline bool operator!=(const char* other) const noexcept { return !eq(other); }
+
+ inline bool operator==(const String& other) const noexcept { return eq(other); }
+ inline bool operator!=(const String& other) const noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isLarge() const noexcept { return _type >= kTypeLarge; }
+ inline bool isExternal() const noexcept { return _type == kTypeExternal; }
+
+ inline bool empty() const noexcept { return size() == 0; }
+ inline size_t size() const noexcept { return isLarge() ? size_t(_large.size) : size_t(_type); }
+ inline size_t capacity() const noexcept { return isLarge() ? _large.capacity : size_t(kSSOCapacity); }
+
+ inline char* data() noexcept { return isLarge() ? _large.data : _small.data; }
+ inline const char* data() const noexcept { return isLarge() ? _large.data : _small.data; }
+
+ inline char* end() noexcept { return data() + size(); }
+ inline const char* end() const noexcept { return data() + size(); }
+
+ //! \}
+
+ //! \name String Operations
+ //! \{
+
+ //! Clear the content of the string.
+ ASMJIT_API Error clear() noexcept;
+
+ ASMJIT_API char* prepare(uint32_t op, size_t size) noexcept;
+
+ ASMJIT_API Error _opString(uint32_t op, const char* str, size_t size = SIZE_MAX) noexcept;
+ ASMJIT_API Error _opFormat(uint32_t op, const char* fmt, ...) noexcept;
+ ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
+ ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
+ ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
+ ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
+ ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t size, char separator = '\0') noexcept;
+
+ //! Replace the string content to a string specified by `data` and `size`. If
+ //! `size` is `SIZE_MAX` then it's considered null-terminated and its length
+ //! will be obtained through `strlen()`.
+ ASMJIT_API Error assignString(const char* data, size_t size = SIZE_MAX) noexcept;
+
+ //! Replace the current content by a formatted string `fmt`.
+ template<typename... Args>
+ inline Error assignFormat(const char* fmt, Args&&... args) noexcept {
+ return _opFormat(kOpAssign, fmt, std::forward<Args>(args)...);
+ }
+
+ //! Replace the current content by a formatted string `fmt` (va_list version).
+ inline Error assignVFormat(const char* fmt, va_list ap) noexcept {
+ return _opVFormat(kOpAssign, fmt, ap);
+ }
+
+ //! Replace the current content by a single `c` character.
+ inline Error assignChar(char c) noexcept {
+ return _opChar(kOpAssign, c);
+ }
+
+ //! Replace the current content by `c` character `n` times.
+ inline Error assignChars(char c, size_t n) noexcept {
+ return _opChars(kOpAssign, c, n);
+ }
+
+ //! Replace the current content by a formatted integer `i` (signed).
+ inline Error assignInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAssign, uint64_t(i), base, width, flags | kFormatSigned);
+ }
+
+ //! Replace the current content by a formatted integer `i` (unsigned).
+ inline Error assignUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAssign, i, base, width, flags);
+ }
+
+ //! Replace the current content by the given `data` converted to a HEX string.
+ inline Error assignHex(const void* data, size_t size, char separator = '\0') noexcept {
+ return _opHex(kOpAssign, data, size, separator);
+ }
+
+ //! Append string `str` of size `size` (or possibly null terminated).
+ inline Error appendString(const char* str, size_t size = SIZE_MAX) noexcept {
+ return _opString(kOpAppend, str, size);
+ }
+
+ template<typename... Args>
+ inline Error appendFormat(const char* fmt, Args&&... args) noexcept {
+ return _opFormat(kOpAppend, fmt, std::forward<Args>(args)...);
+ }
+
+ //! Append a formatted string `fmt` (va_list version).
+ inline Error appendVFormat(const char* fmt, va_list ap) noexcept {
+ return _opVFormat(kOpAppend, fmt, ap);
+ }
+
+ //! Append a single `c` character.
+ inline Error appendChar(char c) noexcept {
+ return _opChar(kOpAppend, c);
+ }
+
+ //! Append `c` character `n` times.
+ inline Error appendChars(char c, size_t n) noexcept {
+ return _opChars(kOpAppend, c, n);
+ }
+
+ ASMJIT_API Error padEnd(size_t n, char c = ' ') noexcept;
+
+ //! Append `i`.
+ inline Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAppend, uint64_t(i), base, width, flags | kFormatSigned);
+ }
+
+ //! Append `i`.
+ inline Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
+ return _opNumber(kOpAppend, i, base, width, flags);
+ }
+
+ //! Append the given `data` converted to a HEX string.
+ inline Error appendHex(const void* data, size_t size, char separator = '\0') noexcept {
+ return _opHex(kOpAppend, data, size, separator);
+ }
+
+ //! Truncate the string length into `newSize`.
+ ASMJIT_API Error truncate(size_t newSize) noexcept;
+
+ ASMJIT_API bool eq(const char* other, size_t size = SIZE_MAX) const noexcept;
+ inline bool eq(const String& other) const noexcept { return eq(other.data(), other.size()); }
+
+ //! \}
+
+ //! \name Internal Functions
+ //! \{
+
+ //! Resets string to embedded and makes it empty (zero length, zero first char)
+ //!
+ //! \note This is always called internally after an external buffer was released
+ //! as it zeroes all bytes used by String's embedded storage.
+ inline void _resetInternal() noexcept {
+ for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
+ _raw.uptr[i] = 0;
+ }
+
+ inline void _setSize(size_t newSize) noexcept {
+ if (isLarge())
+ _large.size = newSize;
+ else
+ _small.type = uint8_t(newSize);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::StringTmp]
+// ============================================================================
+
+//! Temporary string builder, has statically allocated `N` bytes.
+template<size_t N>
+class StringTmp : public String {
+public:
+ ASMJIT_NONCOPYABLE(StringTmp<N>)
+
+ //! Embedded data.
+ char _embeddedData[Support::alignUp(N + 1, sizeof(size_t))];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline StringTmp() noexcept {
+ _resetToTemporary();
+ }
+
+ inline void _resetToTemporary() noexcept {
+ _large.type = kTypeExternal;
+ _large.capacity = ASMJIT_ARRAY_SIZE(_embeddedData) - 1;
+ _large.data = _embeddedData;
+ _embeddedData[0] = '\0';
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::FixedString]
+// ============================================================================
+
+//! A fixed string - only useful for strings that would never exceed `N - 1`
+//! characters; always null-terminated.
+template<size_t N>
+union FixedString {
+ enum : uint32_t {
+ kNumU32 = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
+ };
+
+ char str[kNumU32 * sizeof(uint32_t)];
+ uint32_t u32[kNumU32];
+
+ //! \name Utilities
+ //! \{
+
+ inline bool eq(const char* other) const noexcept {
+ return strcmp(str, other) == 0;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_STRING_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/support.cpp b/3rdparty/asmjit/src/asmjit/core/support.cpp
new file mode 100644
index 00000000000..6b7e0854d81
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/support.cpp
@@ -0,0 +1,499 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Support - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void testArrays(const T* a, const T* b, size_t size) noexcept {
+ for (size_t i = 0; i < size; i++)
+ EXPECT(a[i] == b[i], "Mismatch at %u", unsigned(i));
+}
+
+static void testAlignment() noexcept {
+ INFO("Support::isAligned()");
+ EXPECT(Support::isAligned<size_t>(0xFFFF, 4) == false);
+ EXPECT(Support::isAligned<size_t>(0xFFF4, 4) == true);
+ EXPECT(Support::isAligned<size_t>(0xFFF8, 8) == true);
+ EXPECT(Support::isAligned<size_t>(0xFFF0, 16) == true);
+
+ INFO("Support::alignUp()");
+ EXPECT(Support::alignUp<size_t>(0xFFFF, 4) == 0x10000);
+ EXPECT(Support::alignUp<size_t>(0xFFF4, 4) == 0x0FFF4);
+ EXPECT(Support::alignUp<size_t>(0xFFF8, 8) == 0x0FFF8);
+ EXPECT(Support::alignUp<size_t>(0xFFF0, 16) == 0x0FFF0);
+ EXPECT(Support::alignUp<size_t>(0xFFF0, 32) == 0x10000);
+
+ INFO("Support::alignUpDiff()");
+ EXPECT(Support::alignUpDiff<size_t>(0xFFFF, 4) == 1);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF4, 4) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF8, 8) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 16) == 0);
+ EXPECT(Support::alignUpDiff<size_t>(0xFFF0, 32) == 16);
+
+ INFO("Support::alignUpPowerOf2()");
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0000) == 0x00000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0xFFFF) == 0x10000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0xF123) == 0x10000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0F00) == 0x01000);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x0100) == 0x00100);
+ EXPECT(Support::alignUpPowerOf2<size_t>(0x1001) == 0x02000);
+}
+
+static void testBitUtils() noexcept {
+ uint32_t i;
+
+ INFO("Support::shl() / shr()");
+ EXPECT(Support::shl(int32_t(0x00001111), 16) == int32_t(0x11110000u));
+ EXPECT(Support::shl(uint32_t(0x00001111), 16) == uint32_t(0x11110000u));
+ EXPECT(Support::shr(int32_t(0x11110000u), 16) == int32_t(0x00001111u));
+ EXPECT(Support::shr(uint32_t(0x11110000u), 16) == uint32_t(0x00001111u));
+ EXPECT(Support::sar(int32_t(0xFFFF0000u), 16) == int32_t(0xFFFFFFFFu));
+ EXPECT(Support::sar(uint32_t(0xFFFF0000u), 16) == uint32_t(0xFFFFFFFFu));
+
+ INFO("Support::blsi()");
+ for (i = 0; i < 32; i++) EXPECT(Support::blsi(uint32_t(1) << i) == uint32_t(1) << i);
+ for (i = 0; i < 31; i++) EXPECT(Support::blsi(uint32_t(3) << i) == uint32_t(1) << i);
+ for (i = 0; i < 64; i++) EXPECT(Support::blsi(uint64_t(1) << i) == uint64_t(1) << i);
+ for (i = 0; i < 63; i++) EXPECT(Support::blsi(uint64_t(3) << i) == uint64_t(1) << i);
+
+ INFO("Support::ctz()");
+ for (i = 0; i < 32; i++) EXPECT(Support::ctz(uint32_t(1) << i) == i);
+ for (i = 0; i < 64; i++) EXPECT(Support::ctz(uint64_t(1) << i) == i);
+ for (i = 0; i < 32; i++) EXPECT(Support::constCtz(uint32_t(1) << i) == i);
+ for (i = 0; i < 64; i++) EXPECT(Support::constCtz(uint64_t(1) << i) == i);
+
+ INFO("Support::bitMask()");
+ EXPECT(Support::bitMask(0, 1, 7) == 0x83u);
+ for (i = 0; i < 32; i++)
+ EXPECT(Support::bitMask(i) == (1u << i));
+
+ INFO("Support::bitTest()");
+ for (i = 0; i < 32; i++) {
+ EXPECT(Support::bitTest((1 << i), i) == true, "Support::bitTest(%X, %u) should return true", (1 << i), i);
+ }
+
+ INFO("Support::lsbMask()");
+ for (i = 0; i < 32; i++) {
+ uint32_t expectedBits = 0;
+ for (uint32_t b = 0; b < i; b++)
+ expectedBits |= uint32_t(1) << b;
+ EXPECT(Support::lsbMask<uint32_t>(i) == expectedBits);
+ }
+
+ INFO("Support::popcnt()");
+ for (i = 0; i < 32; i++) EXPECT(Support::popcnt((uint32_t(1) << i)) == 1);
+ for (i = 0; i < 64; i++) EXPECT(Support::popcnt((uint64_t(1) << i)) == 1);
+ EXPECT(Support::popcnt(0x000000F0) == 4);
+ EXPECT(Support::popcnt(0x10101010) == 4);
+ EXPECT(Support::popcnt(0xFF000000) == 8);
+ EXPECT(Support::popcnt(0xFFFFFFF7) == 31);
+ EXPECT(Support::popcnt(0x7FFFFFFF) == 31);
+
+ INFO("Support::isPowerOf2()");
+ for (i = 0; i < 64; i++) {
+ EXPECT(Support::isPowerOf2(uint64_t(1) << i) == true);
+ EXPECT(Support::isPowerOf2((uint64_t(1) << i) ^ 0x001101) == false);
+ }
+}
+
+static void testIntUtils() noexcept {
+ INFO("Support::byteswap()");
+ EXPECT(Support::byteswap32(int32_t(0x01020304)) == int32_t(0x04030201));
+ EXPECT(Support::byteswap32(uint32_t(0x01020304)) == uint32_t(0x04030201));
+
+ INFO("Support::bytepack()");
+ union BytePackData {
+ uint8_t bytes[4];
+ uint32_t u32;
+ } bpdata;
+
+ bpdata.u32 = Support::bytepack32_4x8(0x00, 0x11, 0x22, 0x33);
+ EXPECT(bpdata.bytes[0] == 0x00);
+ EXPECT(bpdata.bytes[1] == 0x11);
+ EXPECT(bpdata.bytes[2] == 0x22);
+ EXPECT(bpdata.bytes[3] == 0x33);
+
+ INFO("Support::isBetween()");
+ EXPECT(Support::isBetween<int>(10 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(11 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(20 , 10, 20) == true);
+ EXPECT(Support::isBetween<int>(9 , 10, 20) == false);
+ EXPECT(Support::isBetween<int>(21 , 10, 20) == false);
+ EXPECT(Support::isBetween<int>(101, 10, 20) == false);
+
+ INFO("Support::isInt8()");
+ EXPECT(Support::isInt8(-128) == true);
+ EXPECT(Support::isInt8( 127) == true);
+ EXPECT(Support::isInt8(-129) == false);
+ EXPECT(Support::isInt8( 128) == false);
+
+ INFO("Support::isInt16()");
+ EXPECT(Support::isInt16(-32768) == true);
+ EXPECT(Support::isInt16( 32767) == true);
+ EXPECT(Support::isInt16(-32769) == false);
+ EXPECT(Support::isInt16( 32768) == false);
+
+ INFO("Support::isInt32()");
+ EXPECT(Support::isInt32( 2147483647 ) == true);
+ EXPECT(Support::isInt32(-2147483647 - 1) == true);
+ EXPECT(Support::isInt32(uint64_t(2147483648u)) == false);
+ EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu)) == false);
+ EXPECT(Support::isInt32(uint64_t(0xFFFFFFFFu) + 1) == false);
+
+ INFO("Support::isUInt8()");
+ EXPECT(Support::isUInt8(0) == true);
+ EXPECT(Support::isUInt8(255) == true);
+ EXPECT(Support::isUInt8(256) == false);
+ EXPECT(Support::isUInt8(-1) == false);
+
+ INFO("Support::isUInt12()");
+ EXPECT(Support::isUInt12(0) == true);
+ EXPECT(Support::isUInt12(4095) == true);
+ EXPECT(Support::isUInt12(4096) == false);
+ EXPECT(Support::isUInt12(-1) == false);
+
+ INFO("Support::isUInt16()");
+ EXPECT(Support::isUInt16(0) == true);
+ EXPECT(Support::isUInt16(65535) == true);
+ EXPECT(Support::isUInt16(65536) == false);
+ EXPECT(Support::isUInt16(-1) == false);
+
+ INFO("Support::isUInt32()");
+ EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF)) == true);
+ EXPECT(Support::isUInt32(uint64_t(0xFFFFFFFF) + 1) == false);
+ EXPECT(Support::isUInt32(-1) == false);
+}
+
+static void testReadWrite() noexcept {
+ INFO("Support::readX() / writeX()");
+
+ uint8_t arr[32] = { 0 };
+
+ Support::writeU16uBE(arr + 1, 0x0102u);
+ Support::writeU16uBE(arr + 3, 0x0304u);
+ EXPECT(Support::readU32uBE(arr + 1) == 0x01020304u);
+ EXPECT(Support::readU32uLE(arr + 1) == 0x04030201u);
+ EXPECT(Support::readU32uBE(arr + 2) == 0x02030400u);
+ EXPECT(Support::readU32uLE(arr + 2) == 0x00040302u);
+
+ Support::writeU32uLE(arr + 5, 0x05060708u);
+ EXPECT(Support::readU64uBE(arr + 1) == 0x0102030408070605u);
+ EXPECT(Support::readU64uLE(arr + 1) == 0x0506070804030201u);
+
+ Support::writeU64uLE(arr + 7, 0x1122334455667788u);
+ EXPECT(Support::readU32uBE(arr + 8) == 0x77665544u);
+}
+
+static void testBitVector() noexcept {
+ INFO("Support::bitVectorOp");
+ {
+ uint32_t vec[3] = { 0 };
+ Support::bitVectorFill(vec, 1, 64);
+ EXPECT(vec[0] == 0xFFFFFFFEu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 1, 1);
+ EXPECT(vec[0] == 0xFFFFFFFCu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorFill(vec, 0, 32);
+ EXPECT(vec[0] == 0xFFFFFFFFu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 0, 32);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorFill(vec, 1, 30);
+ EXPECT(vec[0] == 0x7FFFFFFEu);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x00000001u);
+
+ Support::bitVectorClear(vec, 1, 95);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0x00000000u);
+ EXPECT(vec[2] == 0x00000000u);
+
+ Support::bitVectorFill(vec, 32, 64);
+ EXPECT(vec[0] == 0x00000000u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0xFFFFFFFFu);
+
+ Support::bitVectorSetBit(vec, 1, true);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0xFFFFFFFFu);
+
+ Support::bitVectorSetBit(vec, 95, false);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0xFFFFFFFFu);
+ EXPECT(vec[2] == 0x7FFFFFFFu);
+
+ Support::bitVectorClear(vec, 33, 32);
+ EXPECT(vec[0] == 0x00000002u);
+ EXPECT(vec[1] == 0x00000001u);
+ EXPECT(vec[2] == 0x7FFFFFFEu);
+ }
+
+ INFO("Support::bitVectorIndexOf");
+ {
+ uint32_t vec1[1] = { 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec1, 0, true) == 31);
+ EXPECT(Support::bitVectorIndexOf(vec1, 1, true) == 31);
+ EXPECT(Support::bitVectorIndexOf(vec1, 31, true) == 31);
+
+ uint32_t vec2[2] = { 0x00000000, 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec2, 0, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 1, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 31, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 32, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 33, true) == 63);
+ EXPECT(Support::bitVectorIndexOf(vec2, 63, true) == 63);
+
+ uint32_t vec3[3] = { 0x00000001, 0x00000000, 0x80000000 };
+ EXPECT(Support::bitVectorIndexOf(vec3, 0, true) == 0);
+ EXPECT(Support::bitVectorIndexOf(vec3, 1, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 2, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 31, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 32, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 63, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 64, true) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec3, 95, true) == 95);
+
+ uint32_t vec4[3] = { ~vec3[0], ~vec3[1], ~vec3[2] };
+ EXPECT(Support::bitVectorIndexOf(vec4, 0, false) == 0);
+ EXPECT(Support::bitVectorIndexOf(vec4, 1, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 2, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 31, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 32, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 63, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 64, false) == 95);
+ EXPECT(Support::bitVectorIndexOf(vec4, 95, false) == 95);
+ }
+
+ INFO("Support::BitWordIterator<uint32_t>");
+ {
+ Support::BitWordIterator<uint32_t> it(0x80000F01u);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 0);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 8);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 9);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 10);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 11);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(!it.hasNext());
+
+ // No bits set.
+ it.init(0x00000000u);
+ ASMJIT_ASSERT(!it.hasNext());
+
+ // Only first bit set.
+ it.init(0x00000001u);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 0);
+ ASMJIT_ASSERT(!it.hasNext());
+
+ // Only last bit set (special case).
+ it.init(0x80000000u);
+ ASMJIT_ASSERT(it.hasNext());
+ ASMJIT_ASSERT(it.next() == 31);
+ ASMJIT_ASSERT(!it.hasNext());
+ }
+
+ INFO("Support::BitWordIterator<uint64_t>");
+ {
+ Support::BitWordIterator<uint64_t> it(uint64_t(1) << 63);
+ ASMJIT_ASSERT(it.hasNext());
+ ASMJIT_ASSERT(it.next() == 63);
+ ASMJIT_ASSERT(!it.hasNext());
+ }
+
+ INFO("Support::BitVectorIterator<uint32_t>");
+ {
+ // Border cases.
+ static const uint32_t bitsNone[] = { 0xFFFFFFFFu };
+ Support::BitVectorIterator<uint32_t> it(bitsNone, 0);
+
+ EXPECT(!it.hasNext());
+ it.init(bitsNone, 0, 1);
+ EXPECT(!it.hasNext());
+ it.init(bitsNone, 0, 128);
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits1[] = { 0x80000008u, 0x80000001u, 0x00000000u, 0x80000000u, 0x00000000u, 0x00000000u, 0x00003000u };
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 3);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 32);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 204);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 205);
+ EXPECT(!it.hasNext());
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 4);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 64);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+
+ it.init(bits1, ASMJIT_ARRAY_SIZE(bits1), 127);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+
+ static const uint32_t bits2[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits3[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x00000000u };
+ it.init(bits3, ASMJIT_ARRAY_SIZE(bits3));
+ EXPECT(!it.hasNext());
+
+ static const uint32_t bits4[] = { 0x00000000u, 0x00000000u, 0x00000000u, 0x80000000u };
+ it.init(bits4, ASMJIT_ARRAY_SIZE(bits4));
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 127);
+ EXPECT(!it.hasNext());
+ }
+
+ INFO("Support::BitVectorIterator<uint64_t>");
+ {
+ static const uint64_t bits1[] = { 0x80000000u, 0x80000000u, 0x00000000u, 0x80000000u };
+ Support::BitVectorIterator<uint64_t> it(bits1, ASMJIT_ARRAY_SIZE(bits1));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 31);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 95);
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 223);
+ EXPECT(!it.hasNext());
+
+ static const uint64_t bits2[] = { 0x8000000000000000u, 0, 0, 0 };
+ it.init(bits2, ASMJIT_ARRAY_SIZE(bits2));
+
+ EXPECT(it.hasNext());
+ EXPECT(it.next() == 63);
+ EXPECT(!it.hasNext());
+ }
+}
+
+static void testSorting() noexcept {
+ INFO("Support::qSort() - Testing qsort and isort of predefined arrays");
+ {
+ constexpr size_t kArraySize = 11;
+
+ int ref_[kArraySize] = { -4, -2, -1, 0, 1, 9, 12, 13, 14, 19, 22 };
+ int arr1[kArraySize] = { 0, 1, -1, 19, 22, 14, -4, 9, 12, 13, -2 };
+ int arr2[kArraySize];
+
+ memcpy(arr2, arr1, kArraySize * sizeof(int));
+
+ Support::iSort(arr1, kArraySize);
+ Support::qSort(arr2, kArraySize);
+ testArrays(arr1, ref_, kArraySize);
+ testArrays(arr2, ref_, kArraySize);
+ }
+
+ INFO("Support::qSort() - Testing qsort and isort of artificial arrays");
+ {
+ constexpr size_t kArraySize = 200;
+
+ int arr1[kArraySize];
+ int arr2[kArraySize];
+ int ref_[kArraySize];
+
+ for (size_t size = 2; size < kArraySize; size++) {
+ for (size_t i = 0; i < size; i++) {
+ arr1[i] = int(size - 1 - i);
+ arr2[i] = int(size - 1 - i);
+ ref_[i] = int(i);
+ }
+
+ Support::iSort(arr1, size);
+ Support::qSort(arr2, size);
+ testArrays(arr1, ref_, size);
+ testArrays(arr2, ref_, size);
+ }
+ }
+
+ INFO("Support::qSort() - Testing qsort and isort with an unstable compare function");
+ {
+ constexpr size_t kArraySize = 5;
+
+ float arr1[kArraySize] = { 1.0f, 0.0f, 3.0f, -1.0f, std::numeric_limits<float>::quiet_NaN() };
+ float arr2[kArraySize] = { };
+
+ memcpy(arr2, arr1, kArraySize * sizeof(float));
+
+ // We don't test as it's undefined where the NaN would be.
+ Support::iSort(arr1, kArraySize);
+ Support::qSort(arr2, kArraySize);
+ }
+}
+
+UNIT(support) {
+ testAlignment();
+ testBitUtils();
+ testIntUtils();
+ testReadWrite();
+ testBitVector();
+ testSorting();
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/support.h b/3rdparty/asmjit/src/asmjit/core/support.h
new file mode 100644
index 00000000000..0f49b78f0af
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/support.h
@@ -0,0 +1,1411 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_SUPPORT_H_INCLUDED
+#define ASMJIT_CORE_SUPPORT_H_INCLUDED
+
+#include "../core/globals.h"
+
+#if defined(_MSC_VER)
+ #include <intrin.h>
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_support
+//! \{
+
+//! Contains support classes and functions that may be used by AsmJit source
+//! and header files. Anything defined here is considered internal and should
+//! not be used outside of AsmJit and related projects like AsmTK.
+namespace Support {
+
+// ============================================================================
+// [asmjit::Support - Architecture Features & Constraints]
+// ============================================================================
+
+//! \cond INTERNAL
+static constexpr bool kUnalignedAccess16 = ASMJIT_ARCH_X86 != 0;
+static constexpr bool kUnalignedAccess32 = ASMJIT_ARCH_X86 != 0;
+static constexpr bool kUnalignedAccess64 = ASMJIT_ARCH_X86 != 0;
+//! \endcond
+
+// ============================================================================
+// [asmjit::Support - Internal]
+// ============================================================================
+
+//! \cond INTERNAL
+namespace Internal {
+ template<typename T, size_t Alignment>
+ struct AlignedInt {};
+
+ template<> struct AlignedInt<uint16_t, 1> { typedef uint16_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint16_t, 2> { typedef uint16_t T; };
+ template<> struct AlignedInt<uint32_t, 1> { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint32_t, 2> { typedef uint32_t ASMJIT_ALIGN_TYPE(T, 2); };
+ template<> struct AlignedInt<uint32_t, 4> { typedef uint32_t T; };
+ template<> struct AlignedInt<uint64_t, 1> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 1); };
+ template<> struct AlignedInt<uint64_t, 2> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 2); };
+ template<> struct AlignedInt<uint64_t, 4> { typedef uint64_t ASMJIT_ALIGN_TYPE(T, 4); };
+ template<> struct AlignedInt<uint64_t, 8> { typedef uint64_t T; };
+
+ // IntBySize - Make an int-type by size (signed or unsigned) that is the
+ // same as types defined by <stdint.h>.
+ // Int32Or64 - Make an int-type that has at least 32 bits: [u]int[32|64]_t.
+
+ template<size_t SIZE, int IS_SIGNED>
+ struct IntBySize {}; // Fail if not specialized.
+
+ template<> struct IntBySize<1, 0> { typedef uint8_t Type; };
+ template<> struct IntBySize<1, 1> { typedef int8_t Type; };
+ template<> struct IntBySize<2, 0> { typedef uint16_t Type; };
+ template<> struct IntBySize<2, 1> { typedef int16_t Type; };
+ template<> struct IntBySize<4, 0> { typedef uint32_t Type; };
+ template<> struct IntBySize<4, 1> { typedef int32_t Type; };
+ template<> struct IntBySize<8, 0> { typedef uint64_t Type; };
+ template<> struct IntBySize<8, 1> { typedef int64_t Type; };
+
+ template<typename T, int IS_SIGNED = std::is_signed<T>::value>
+ struct Int32Or64 : public IntBySize<sizeof(T) <= 4 ? size_t(4) : sizeof(T), IS_SIGNED> {};
+}
+//! \endcond
+
+// ============================================================================
+// [asmjit::Support - FastUInt8]
+// ============================================================================
+
+#if ASMJIT_ARCH_X86
+typedef uint8_t FastUInt8;
+#else
+typedef unsigned int FastUInt8;
+#endif
+
+// ============================================================================
+// [asmjit::Support - IntBySize / Int32Or64]
+// ============================================================================
+
+//! Casts an integer `x` to either `int32_t` or `int64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T, 1>::Type asInt(T x) noexcept { return (typename Internal::Int32Or64<T, 1>::Type)x; }
+
+//! Casts an integer `x` to either `uint32_t` or `uint64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T, 0>::Type asUInt(T x) noexcept { return (typename Internal::Int32Or64<T, 0>::Type)x; }
+
+//! Casts an integer `x` to either `int32_t`, uint32_t`, `int64_t`, or `uint64_t` depending on `T`.
+template<typename T>
+static constexpr typename Internal::Int32Or64<T>::Type asNormalized(T x) noexcept { return (typename Internal::Int32Or64<T>::Type)x; }
+
+// ============================================================================
+// [asmjit::Support - BitCast]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename DstT, typename SrcT>
+ union BitCastUnion {
+ ASMJIT_INLINE BitCastUnion(SrcT src) noexcept : src(src) {}
+ SrcT src;
+ DstT dst;
+ };
+}
+//! \endcond
+
+//! Bit-casts from `Src` type to `Dst` type.
+//!
+//! Useful to bit-cast between integers and floating points.
+template<typename Dst, typename Src>
+static inline Dst bitCast(const Src& x) noexcept { return Internal::BitCastUnion<Dst, Src>(x).dst; }
+
+// ============================================================================
+// [asmjit::Support - BitOps]
+// ============================================================================
+
+//! Storage used to store a pack of bits (should by compatible with a machine word).
+typedef Internal::IntBySize<sizeof(uintptr_t), 0>::Type BitWord;
+
+template<typename T>
+static constexpr uint32_t bitSizeOf() noexcept { return uint32_t(sizeof(T) * 8u); }
+
+//! Number of bits stored in a single `BitWord`.
+static constexpr uint32_t kBitWordSizeInBits = bitSizeOf<BitWord>();
+
+//! Returns `0 - x` in a safe way (no undefined behavior), works for unsigned numbers as well.
+template<typename T>
+static constexpr T neg(const T& x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(U(0) - U(x));
+}
+
+template<typename T>
+static constexpr T allOnes() noexcept { return neg<T>(T(1)); }
+
+//! Returns `x << y` (shift left logical) by explicitly casting `x` to an unsigned type and back.
+template<typename X, typename Y>
+static constexpr X shl(const X& x, const Y& y) noexcept {
+ typedef typename std::make_unsigned<X>::type U;
+ return X(U(x) << y);
+}
+
+//! Returns `x >> y` (shift right logical) by explicitly casting `x` to an unsigned type and back.
+template<typename X, typename Y>
+static constexpr X shr(const X& x, const Y& y) noexcept {
+ typedef typename std::make_unsigned<X>::type U;
+ return X(U(x) >> y);
+}
+
+//! Returns `x >> y` (shift right arithmetic) by explicitly casting `x` to a signed type and back.
+template<typename X, typename Y>
+static constexpr X sar(const X& x, const Y& y) noexcept {
+ typedef typename std::make_signed<X>::type S;
+ return X(S(x) >> y);
+}
+
+//! Returns `x | (x >> y)` - helper used by some bit manipulation helpers.
+template<typename X, typename Y>
+static constexpr X or_shr(const X& x, const Y& y) noexcept { return X(x | shr(x, y)); }
+
+//! Returns `x & -x` - extracts lowest set isolated bit (like BLSI instruction).
+template<typename T>
+static constexpr T blsi(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(U(x) & neg(U(x)));
+}
+
+//! Generate a trailing bit-mask that has `n` least significant (trailing) bits set.
+template<typename T, typename CountT>
+static constexpr T lsbMask(CountT n) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return (sizeof(U) < sizeof(uintptr_t))
+ ? T(U((uintptr_t(1) << n) - uintptr_t(1)))
+ // Shifting more bits than the type provides is UNDEFINED BEHAVIOR.
+ // In such case we trash the result by ORing it with a mask that has
+ // all bits set and discards the UNDEFINED RESULT of the shift.
+ : T(((U(1) << n) - U(1u)) | neg(U(n >= CountT(bitSizeOf<T>()))));
+}
+
+//! Tests whether the given value `x` has `n`th bit set.
+template<typename T, typename IndexT>
+static constexpr bool bitTest(T x, IndexT n) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return (U(x) & (U(1) << n)) != 0;
+}
+
+//! Returns a bit-mask that has `x` bit set.
+template<typename T>
+static constexpr uint32_t bitMask(T x) noexcept { return (1u << x); }
+
+//! Returns a bit-mask that has `x` bit set (multiple arguments).
+template<typename T, typename... Args>
+static constexpr uint32_t bitMask(T x, Args... args) noexcept { return bitMask(x) | bitMask(args...); }
+
+//! Converts a boolean value `b` to zero or full mask (all bits set).
+template<typename DstT, typename SrcT>
+static constexpr DstT bitMaskFromBool(SrcT b) noexcept {
+ typedef typename std::make_unsigned<DstT>::type U;
+ return DstT(U(0) - U(b));
+}
+
+//! \cond
+namespace Internal {
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint8_t fillTrailingBitsImpl(uint8_t x) noexcept { return or_shr(or_shr(or_shr(x, 1), 2), 4); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint16_t fillTrailingBitsImpl(uint16_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint32_t fillTrailingBitsImpl(uint32_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16); }
+ // Fills all trailing bits right from the first most significant bit set.
+ static constexpr uint64_t fillTrailingBitsImpl(uint64_t x) noexcept { return or_shr(or_shr(or_shr(or_shr(or_shr(or_shr(x, 1), 2), 4), 8), 16), 32); }
+}
+//! \endcond
+
+// Fills all trailing bits right from the first most significant bit set.
+template<typename T>
+static constexpr T fillTrailingBits(const T& x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return T(Internal::fillTrailingBitsImpl(U(x)));
+}
+
+// ============================================================================
+// [asmjit::Support - CTZ]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ static constexpr uint32_t constCtzImpl(uint32_t xAndNegX) noexcept {
+ return 31 - ((xAndNegX & 0x0000FFFFu) ? 16 : 0)
+ - ((xAndNegX & 0x00FF00FFu) ? 8 : 0)
+ - ((xAndNegX & 0x0F0F0F0Fu) ? 4 : 0)
+ - ((xAndNegX & 0x33333333u) ? 2 : 0)
+ - ((xAndNegX & 0x55555555u) ? 1 : 0);
+ }
+
+ static constexpr uint32_t constCtzImpl(uint64_t xAndNegX) noexcept {
+ return 63 - ((xAndNegX & 0x00000000FFFFFFFFu) ? 32 : 0)
+ - ((xAndNegX & 0x0000FFFF0000FFFFu) ? 16 : 0)
+ - ((xAndNegX & 0x00FF00FF00FF00FFu) ? 8 : 0)
+ - ((xAndNegX & 0x0F0F0F0F0F0F0F0Fu) ? 4 : 0)
+ - ((xAndNegX & 0x3333333333333333u) ? 2 : 0)
+ - ((xAndNegX & 0x5555555555555555u) ? 1 : 0);
+ }
+
+ template<typename T>
+ static constexpr uint32_t constCtz(T x) noexcept {
+ return constCtzImpl(x & neg(x));
+ }
+
+ static ASMJIT_INLINE uint32_t ctz(uint32_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_ctz(x));
+ #elif defined(_MSC_VER) && (ASMJIT_ARCH_X86 || ASMJIT_ARCH_ARM)
+ unsigned long i;
+ _BitScanForward(&i, x);
+ return uint32_t(i);
+ #else
+ return constCtz(x);
+ #endif
+ }
+
+ static ASMJIT_INLINE uint32_t ctz(uint64_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_ctzll(x));
+ #elif defined(_MSC_VER) && (ASMJIT_ARCH_X86 == 64 || ASMJIT_ARCH_ARM == 64)
+ unsigned long i;
+ _BitScanForward64(&i, x);
+ return uint32_t(i);
+ #else
+ return constCtz(x);
+ #endif
+ }
+}
+//! \endcond
+
+//! Count trailing zeros in `x` (returns a position of a first bit set in `x`).
+//!
+//! \note The input MUST NOT be zero, otherwise the result is undefined.
+template<typename T>
+static inline uint32_t ctz(T x) noexcept { return Internal::ctz(asUInt(x)); }
+
+//! Count trailing zeros in `x` (constant expression).
+template<typename T>
+static constexpr uint32_t constCtz(T x) noexcept { return Internal::constCtz(asUInt(x)); }
+
+// ============================================================================
+// [asmjit::Support - PopCnt]
+// ============================================================================
+
+// Based on the following resource:
+// http://graphics.stanford.edu/~seander/bithacks.html
+//
+// Alternatively, for a very small number of bits in `x`:
+// uint32_t n = 0;
+// while (x) {
+// x &= x - 1;
+// n++;
+// }
+// return n;
+
+//! \cond
+namespace Internal {
+ static inline uint32_t constPopcntImpl(uint32_t x) noexcept {
+ x = x - ((x >> 1) & 0x55555555u);
+ x = (x & 0x33333333u) + ((x >> 2) & 0x33333333u);
+ return (((x + (x >> 4)) & 0x0F0F0F0Fu) * 0x01010101u) >> 24;
+ }
+
+ static inline uint32_t constPopcntImpl(uint64_t x) noexcept {
+ if (ASMJIT_ARCH_BITS >= 64) {
+ x = x - ((x >> 1) & 0x5555555555555555u);
+ x = (x & 0x3333333333333333u) + ((x >> 2) & 0x3333333333333333u);
+ return uint32_t((((x + (x >> 4)) & 0x0F0F0F0F0F0F0F0Fu) * 0x0101010101010101u) >> 56);
+ }
+ else {
+ return constPopcntImpl(uint32_t(x >> 32)) +
+ constPopcntImpl(uint32_t(x & 0xFFFFFFFFu));
+ }
+ }
+
+ static inline uint32_t popcntImpl(uint32_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_popcount(x));
+ #else
+ return constPopcntImpl(asUInt(x));
+ #endif
+ }
+
+ static inline uint32_t popcntImpl(uint64_t x) noexcept {
+ #if defined(__GNUC__)
+ return uint32_t(__builtin_popcountll(x));
+ #else
+ return constPopcntImpl(asUInt(x));
+ #endif
+ }
+}
+//! \endcond
+
+//! Calculates count of bits in `x`.
+template<typename T>
+static inline uint32_t popcnt(T x) noexcept { return Internal::popcntImpl(asUInt(x)); }
+
+//! Calculates count of bits in `x` (useful in constant expressions).
+template<typename T>
+static inline uint32_t constPopcnt(T x) noexcept { return Internal::constPopcntImpl(asUInt(x)); }
+
+// ============================================================================
+// [asmjit::Support - Min/Max]
+// ============================================================================
+
+// NOTE: These are constexpr `min()` and `max()` implementations that are not
+// exactly the same as `std::min()` and `std::max()`. The return value is not
+// a reference to `a` or `b` but it's a new value instead.
+
+template<typename T>
+static constexpr T min(const T& a, const T& b) noexcept { return b < a ? b : a; }
+
+template<typename T, typename... Args>
+static constexpr T min(const T& a, const T& b, Args&&... args) noexcept { return min(min(a, b), std::forward<Args>(args)...); }
+
+template<typename T>
+static constexpr T max(const T& a, const T& b) noexcept { return a < b ? b : a; }
+
+template<typename T, typename... Args>
+static constexpr T max(const T& a, const T& b, Args&&... args) noexcept { return max(max(a, b), std::forward<Args>(args)...); }
+
+// ============================================================================
+// [asmjit::Support - Overflow Arithmetic]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename T>
+ static ASMJIT_INLINE T addOverflowImpl(T x, T y, FastUInt8* of) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ U result = U(x) + U(y);
+ *of = FastUInt8(*of | FastUInt8(std::is_unsigned<T>::value ? result < U(x) : T((U(x) ^ ~U(y)) & (U(x) ^ result)) < 0));
+ return T(result);
+ }
+
+ template<typename T>
+ static ASMJIT_INLINE T subOverflowImpl(T x, T y, FastUInt8* of) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ U result = U(x) - U(y);
+ *of = FastUInt8(*of | FastUInt8(std::is_unsigned<T>::value ? result > U(x) : T((U(x) ^ U(y)) & (U(x) ^ result)) < 0));
+ return T(result);
+ }
+}
+//! \endcond
+
+template<typename T>
+static ASMJIT_INLINE T addOverflow(const T& x, const T& y, FastUInt8* of) noexcept { return T(Internal::addOverflowImpl(x, y, of)); }
+
+template<typename T>
+static ASMJIT_INLINE T subOverflow(const T& x, const T& y, FastUInt8* of) noexcept { return T(Internal::subOverflowImpl(x, y, of)); }
+
+// ============================================================================
+// [asmjit::Support - Alignment]
+// ============================================================================
+
+template<typename X, typename Y>
+static constexpr bool isAligned(X base, Y alignment) noexcept {
+ typedef typename Internal::IntBySize<sizeof(X), 0>::Type U;
+ return ((U)base % (U)alignment) == 0;
+}
+
+//! Tests whether the `x` is a power of two (only one bit is set).
+template<typename T>
+static constexpr bool isPowerOf2(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+ return x && !(U(x) & (U(x) - U(1)));
+}
+
+template<typename X, typename Y>
+static constexpr X alignUp(X x, Y alignment) noexcept {
+ typedef typename Internal::IntBySize<sizeof(X), 0>::Type U;
+ return (X)( ((U)x + ((U)(alignment) - 1u)) & ~((U)(alignment) - 1u) );
+}
+
+template<typename T>
+static constexpr T alignUpPowerOf2(T x) noexcept {
+ typedef typename Internal::IntBySize<sizeof(T), 0>::Type U;
+ return (T)(fillTrailingBits(U(x) - 1u) + 1u);
+}
+
+//! Returns either zero or a positive difference between `base` and `base` when
+//! aligned to `alignment`.
+template<typename X, typename Y>
+static constexpr typename Internal::IntBySize<sizeof(X), 0>::Type alignUpDiff(X base, Y alignment) noexcept {
+ typedef typename Internal::IntBySize<sizeof(X), 0>::Type U;
+ return alignUp(U(base), alignment) - U(base);
+}
+
+template<typename X, typename Y>
+static constexpr X alignDown(X x, Y alignment) noexcept {
+ typedef typename Internal::IntBySize<sizeof(X), 0>::Type U;
+ return (X)( (U)x & ~((U)(alignment) - 1u) );
+}
+
+// ============================================================================
+// [asmjit::Support - NumGranularized]
+// ============================================================================
+
+//! Calculates the number of elements that would be required if `base` is
+//! granularized by `granularity`. This function can be used to calculate
+//! the number of BitWords to represent N bits, for example.
+template<typename X, typename Y>
+static constexpr X numGranularized(X base, Y granularity) noexcept {
+ typedef typename Internal::IntBySize<sizeof(X), 0>::Type U;
+ return X((U(base) + U(granularity) - 1) / U(granularity));
+}
+
+// ============================================================================
+// [asmjit::Support - IsBetween]
+// ============================================================================
+
+//! Checks whether `x` is greater than or equal to `a` and lesser than or equal to `b`.
+template<typename T>
+static constexpr bool isBetween(const T& x, const T& a, const T& b) noexcept {
+ return x >= a && x <= b;
+}
+
+// ============================================================================
+// [asmjit::Support - IsInt / IsUInt]
+// ============================================================================
+
+//! Checks whether the given integer `x` can be casted to a 4-bit signed integer.
+template<typename T>
+static constexpr bool isInt4(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? isBetween<S>(S(x), -8, 7)
+ : U(x) <= U(7u);
+}
+
+//! Checks whether the given integer `x` can be casted to an 8-bit signed integer.
+template<typename T>
+static constexpr bool isInt8(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 1 || isBetween<S>(S(x), -128, 127)
+ : U(x) <= U(127u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 16-bit signed integer.
+template<typename T>
+static constexpr bool isInt16(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 2 || isBetween<S>(S(x), -32768, 32767)
+ : sizeof(T) <= 1 || U(x) <= U(32767u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit signed integer.
+template<typename T>
+static constexpr bool isInt32(T x) noexcept {
+ typedef typename std::make_signed<T>::type S;
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? sizeof(T) <= 4 || isBetween<S>(S(x), -2147483647 - 1, 2147483647)
+ : sizeof(T) <= 2 || U(x) <= U(2147483647u);
+}
+
+//! Checks whether the given integer `x` can be casted to a 4-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt4(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? x >= T(0) && x <= T(15)
+ : U(x) <= U(15u);
+}
+
+//! Checks whether the given integer `x` can be casted to an 8-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt8(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 1 || T(x) <= T(255)) && x >= T(0)
+ : (sizeof(T) <= 1 || U(x) <= U(255u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific).
+template<typename T>
+static constexpr bool isUInt12(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 1 || T(x) <= T(4095)) && x >= T(0)
+ : (sizeof(T) <= 1 || U(x) <= U(4095u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 16-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt16(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 2 || T(x) <= T(65535)) && x >= T(0)
+ : (sizeof(T) <= 2 || U(x) <= U(65535u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer.
+template<typename T>
+static constexpr bool isUInt32(T x) noexcept {
+ typedef typename std::make_unsigned<T>::type U;
+
+ return std::is_signed<T>::value ? (sizeof(T) <= 4 || T(x) <= T(4294967295u)) && x >= T(0)
+ : (sizeof(T) <= 4 || U(x) <= U(4294967295u));
+}
+
+//! Checks whether the given integer `x` can be casted to a 32-bit unsigned integer.
+template<typename T>
+static constexpr bool isIntOrUInt32(T x) noexcept {
+ return sizeof(T) <= 4 ? true : (uint32_t(uint64_t(x) >> 32) + 1u) <= 1u;
+}
+
+// ============================================================================
+// [asmjit::Support - ByteSwap]
+// ============================================================================
+
+static constexpr uint32_t byteswap32(uint32_t x) noexcept {
+ return (x << 24) | (x >> 24) | ((x << 8) & 0x00FF0000u) | ((x >> 8) & 0x0000FF00);
+}
+
+// ============================================================================
+// [asmjit::Support - BytePack / Unpack]
+// ============================================================================
+
+//! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`.
+static constexpr uint32_t bytepack32_4x8(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
+ return ASMJIT_ARCH_LE ? (a | (b << 8) | (c << 16) | (d << 24))
+ : (d | (c << 8) | (b << 16) | (a << 24));
+}
+
+template<typename T>
+static constexpr uint32_t unpackU32At0(T x) noexcept { return ASMJIT_ARCH_LE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); }
+template<typename T>
+static constexpr uint32_t unpackU32At1(T x) noexcept { return ASMJIT_ARCH_BE ? uint32_t(uint64_t(x) & 0xFFFFFFFFu) : uint32_t(uint64_t(x) >> 32); }
+
+// ============================================================================
+// [asmjit::Support - Position of byte (in bit-shift)]
+// ============================================================================
+
+static inline uint32_t byteShiftOfDWordStruct(uint32_t index) noexcept {
+ return ASMJIT_ARCH_LE ? index * 8 : (uint32_t(sizeof(uint32_t)) - 1u - index) * 8;
+}
+
+// ============================================================================
+// [asmjit::Support - String Utilities]
+// ============================================================================
+
+template<typename T>
+static constexpr T asciiToLower(T c) noexcept { return c ^ (T(c >= T('A') && c <= T('Z')) << 5); }
+
+template<typename T>
+static constexpr T asciiToUpper(T c) noexcept { return c ^ (T(c >= T('a') && c <= T('z')) << 5); }
+
+static ASMJIT_INLINE size_t strLen(const char* s, size_t maxSize) noexcept {
+ size_t i = 0;
+ while (i < maxSize && s[i] != '\0')
+ i++;
+ return i;
+}
+
+static constexpr uint32_t hashRound(uint32_t hash, uint32_t c) noexcept { return hash * 65599 + c; }
+
+// Gets a hash of the given string `data` of size `size`. Size must be valid
+// as this function doesn't check for a null terminator and allows it in the
+// middle of the string.
+static inline uint32_t hashString(const char* data, size_t size) noexcept {
+ uint32_t hashCode = 0;
+ for (uint32_t i = 0; i < size; i++)
+ hashCode = hashRound(hashCode, uint8_t(data[i]));
+ return hashCode;
+}
+
+static ASMJIT_INLINE const char* findPackedString(const char* p, uint32_t id) noexcept {
+ uint32_t i = 0;
+ while (i < id) {
+ while (p[0])
+ p++;
+ p++;
+ i++;
+ }
+ return p;
+}
+
+//! Compares two instruction names.
+//!
+//! `a` is a null terminated instruction name from arch-specific `nameData[]`
+//! table. `b` is a possibly non-null terminated instruction name passed to
+//! `InstAPI::stringToInstId()`.
+static ASMJIT_INLINE int cmpInstName(const char* a, const char* b, size_t size) noexcept {
+ for (size_t i = 0; i < size; i++) {
+ int c = int(uint8_t(a[i])) - int(uint8_t(b[i]));
+ if (c != 0) return c;
+ }
+ return int(uint8_t(a[size]));
+}
+
+// ============================================================================
+// [asmjit::Support - Read / Write]
+// ============================================================================
+
+static inline uint32_t readU8(const void* p) noexcept { return uint32_t(static_cast<const uint8_t*>(p)[0]); }
+static inline int32_t readI8(const void* p) noexcept { return int32_t(static_cast<const int8_t*>(p)[0]); }
+
+template<uint32_t BO, size_t Alignment>
+static inline uint32_t readU16x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ return uint32_t(static_cast<const U16AlignedToN*>(p)[0]);
+ }
+ else {
+ uint32_t hi = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 0));
+ uint32_t lo = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 1));
+ return shl(hi, 8) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline int32_t readI16x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ return int32_t(int16_t(static_cast<const U16AlignedToN*>(p)[0]));
+ }
+ else {
+ int32_t hi = readI8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 0));
+ uint32_t lo = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 1));
+ return shl(hi, 8) | int32_t(lo);
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative>
+static inline uint32_t readU24u(const void* p) noexcept {
+ uint32_t b0 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 2 : 0));
+ uint32_t b1 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 1 : 1));
+ uint32_t b2 = readU8(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 2));
+ return shl(b0, 16) | shl(b1, 8) | b2;
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline uint32_t readU32x(const void* p) noexcept {
+ if (kUnalignedAccess32 || Alignment >= 4) {
+ typedef typename Internal::AlignedInt<uint32_t, Alignment>::T U32AlignedToN;
+ uint32_t x = static_cast<const U32AlignedToN*>(p)[0];
+ return BO == ByteOrder::kNative ? x : byteswap32(x);
+ }
+ else {
+ uint32_t hi = readU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 2 : 0));
+ uint32_t lo = readU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 2));
+ return shl(hi, 16) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline uint64_t readU64x(const void* p) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) {
+ typedef typename Internal::AlignedInt<uint64_t, Alignment>::T U64AlignedToN;
+ return static_cast<const U64AlignedToN*>(p)[0];
+ }
+ else {
+ uint32_t hi = readU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 4 : 0));
+ uint32_t lo = readU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<const uint8_t*>(p) + (BO == ByteOrder::kLE ? 0 : 4));
+ return shl(uint64_t(hi), 32) | lo;
+ }
+}
+
+template<uint32_t BO, size_t Alignment>
+static inline int32_t readI32x(const void* p) noexcept { return int32_t(readU32x<BO, Alignment>(p)); }
+
+template<uint32_t BO, size_t Alignment>
+static inline int64_t readI64x(const void* p) noexcept { return int64_t(readU64x<BO, Alignment>(p)); }
+
+template<size_t Alignment> static inline int32_t readI16xLE(const void* p) noexcept { return readI16x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI16xBE(const void* p) noexcept { return readI16x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU16xLE(const void* p) noexcept { return readU16x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU16xBE(const void* p) noexcept { return readU16x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI32xLE(const void* p) noexcept { return readI32x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int32_t readI32xBE(const void* p) noexcept { return readI32x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU32xLE(const void* p) noexcept { return readU32x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint32_t readU32xBE(const void* p) noexcept { return readU32x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline int64_t readI64xLE(const void* p) noexcept { return readI64x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline int64_t readI64xBE(const void* p) noexcept { return readI64x<ByteOrder::kBE, Alignment>(p); }
+template<size_t Alignment> static inline uint64_t readU64xLE(const void* p) noexcept { return readU64x<ByteOrder::kLE, Alignment>(p); }
+template<size_t Alignment> static inline uint64_t readU64xBE(const void* p) noexcept { return readU64x<ByteOrder::kBE, Alignment>(p); }
+
+static inline int32_t readI16a(const void* p) noexcept { return readI16x<ByteOrder::kNative, 2>(p); }
+static inline int32_t readI16u(const void* p) noexcept { return readI16x<ByteOrder::kNative, 1>(p); }
+static inline uint32_t readU16a(const void* p) noexcept { return readU16x<ByteOrder::kNative, 2>(p); }
+static inline uint32_t readU16u(const void* p) noexcept { return readU16x<ByteOrder::kNative, 1>(p); }
+
+static inline int32_t readI16aLE(const void* p) noexcept { return readI16xLE<2>(p); }
+static inline int32_t readI16uLE(const void* p) noexcept { return readI16xLE<1>(p); }
+static inline uint32_t readU16aLE(const void* p) noexcept { return readU16xLE<2>(p); }
+static inline uint32_t readU16uLE(const void* p) noexcept { return readU16xLE<1>(p); }
+
+static inline int32_t readI16aBE(const void* p) noexcept { return readI16xBE<2>(p); }
+static inline int32_t readI16uBE(const void* p) noexcept { return readI16xBE<1>(p); }
+static inline uint32_t readU16aBE(const void* p) noexcept { return readU16xBE<2>(p); }
+static inline uint32_t readU16uBE(const void* p) noexcept { return readU16xBE<1>(p); }
+
+static inline uint32_t readU24uLE(const void* p) noexcept { return readU24u<ByteOrder::kLE>(p); }
+static inline uint32_t readU24uBE(const void* p) noexcept { return readU24u<ByteOrder::kBE>(p); }
+
+static inline int32_t readI32a(const void* p) noexcept { return readI32x<ByteOrder::kNative, 4>(p); }
+static inline int32_t readI32u(const void* p) noexcept { return readI32x<ByteOrder::kNative, 1>(p); }
+static inline uint32_t readU32a(const void* p) noexcept { return readU32x<ByteOrder::kNative, 4>(p); }
+static inline uint32_t readU32u(const void* p) noexcept { return readU32x<ByteOrder::kNative, 1>(p); }
+
+static inline int32_t readI32aLE(const void* p) noexcept { return readI32xLE<4>(p); }
+static inline int32_t readI32uLE(const void* p) noexcept { return readI32xLE<1>(p); }
+static inline uint32_t readU32aLE(const void* p) noexcept { return readU32xLE<4>(p); }
+static inline uint32_t readU32uLE(const void* p) noexcept { return readU32xLE<1>(p); }
+
+static inline int32_t readI32aBE(const void* p) noexcept { return readI32xBE<4>(p); }
+static inline int32_t readI32uBE(const void* p) noexcept { return readI32xBE<1>(p); }
+static inline uint32_t readU32aBE(const void* p) noexcept { return readU32xBE<4>(p); }
+static inline uint32_t readU32uBE(const void* p) noexcept { return readU32xBE<1>(p); }
+
+static inline int64_t readI64a(const void* p) noexcept { return readI64x<ByteOrder::kNative, 8>(p); }
+static inline int64_t readI64u(const void* p) noexcept { return readI64x<ByteOrder::kNative, 1>(p); }
+static inline uint64_t readU64a(const void* p) noexcept { return readU64x<ByteOrder::kNative, 8>(p); }
+static inline uint64_t readU64u(const void* p) noexcept { return readU64x<ByteOrder::kNative, 1>(p); }
+
+static inline int64_t readI64aLE(const void* p) noexcept { return readI64xLE<8>(p); }
+static inline int64_t readI64uLE(const void* p) noexcept { return readI64xLE<1>(p); }
+static inline uint64_t readU64aLE(const void* p) noexcept { return readU64xLE<8>(p); }
+static inline uint64_t readU64uLE(const void* p) noexcept { return readU64xLE<1>(p); }
+
+static inline int64_t readI64aBE(const void* p) noexcept { return readI64xBE<8>(p); }
+static inline int64_t readI64uBE(const void* p) noexcept { return readI64xBE<1>(p); }
+static inline uint64_t readU64aBE(const void* p) noexcept { return readU64xBE<8>(p); }
+static inline uint64_t readU64uBE(const void* p) noexcept { return readU64xBE<1>(p); }
+
+static inline void writeU8(void* p, uint32_t x) noexcept { static_cast<uint8_t*>(p)[0] = uint8_t(x & 0xFFu); }
+static inline void writeI8(void* p, int32_t x) noexcept { static_cast<uint8_t*>(p)[0] = uint8_t(x & 0xFF); }
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU16x(void* p, uint32_t x) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess16 || Alignment >= 2)) {
+ typedef typename Internal::AlignedInt<uint16_t, Alignment>::T U16AlignedToN;
+ static_cast<U16AlignedToN*>(p)[0] = uint16_t(x & 0xFFFFu);
+ }
+ else {
+ static_cast<uint8_t*>(p)[0] = uint8_t((x >> (BO == ByteOrder::kLE ? 0 : 8)) & 0xFFu);
+ static_cast<uint8_t*>(p)[1] = uint8_t((x >> (BO == ByteOrder::kLE ? 8 : 0)) & 0xFFu);
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative>
+static inline void writeU24u(void* p, uint32_t v) noexcept {
+ static_cast<uint8_t*>(p)[0] = uint8_t((v >> (BO == ByteOrder::kLE ? 0 : 16)) & 0xFFu);
+ static_cast<uint8_t*>(p)[1] = uint8_t((v >> (BO == ByteOrder::kLE ? 8 : 8)) & 0xFFu);
+ static_cast<uint8_t*>(p)[2] = uint8_t((v >> (BO == ByteOrder::kLE ? 16 : 0)) & 0xFFu);
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU32x(void* p, uint32_t x) noexcept {
+ if (kUnalignedAccess32 || Alignment >= 4) {
+ typedef typename Internal::AlignedInt<uint32_t, Alignment>::T U32AlignedToN;
+ static_cast<U32AlignedToN*>(p)[0] = (BO == ByteOrder::kNative) ? x : Support::byteswap32(x);
+ }
+ else {
+ writeU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<uint8_t*>(p) + 0, x >> (BO == ByteOrder::kLE ? 0 : 16));
+ writeU16x<BO, Alignment >= 2 ? size_t(2) : Alignment>(static_cast<uint8_t*>(p) + 2, x >> (BO == ByteOrder::kLE ? 16 : 0));
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1>
+static inline void writeU64x(void* p, uint64_t x) noexcept {
+ if (BO == ByteOrder::kNative && (kUnalignedAccess64 || Alignment >= 8)) {
+ typedef typename Internal::AlignedInt<uint64_t, Alignment>::T U64AlignedToN;
+ static_cast<U64AlignedToN*>(p)[0] = x;
+ }
+ else {
+ writeU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<uint8_t*>(p) + 0, uint32_t((x >> (BO == ByteOrder::kLE ? 0 : 32)) & 0xFFFFFFFFu));
+ writeU32x<BO, Alignment >= 4 ? size_t(4) : Alignment>(static_cast<uint8_t*>(p) + 4, uint32_t((x >> (BO == ByteOrder::kLE ? 32 : 0)) & 0xFFFFFFFFu));
+ }
+}
+
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI16x(void* p, int32_t x) noexcept { writeU16x<BO, Alignment>(p, uint32_t(x)); }
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI32x(void* p, int32_t x) noexcept { writeU32x<BO, Alignment>(p, uint32_t(x)); }
+template<uint32_t BO = ByteOrder::kNative, size_t Alignment = 1> static inline void writeI64x(void* p, int64_t x) noexcept { writeU64x<BO, Alignment>(p, uint64_t(x)); }
+
+template<size_t Alignment = 1> static inline void writeI16xLE(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI16xBE(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU16xLE(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU16xBE(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kBE, Alignment>(p, x); }
+
+template<size_t Alignment = 1> static inline void writeI32xLE(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI32xBE(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU32xLE(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU32xBE(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kBE, Alignment>(p, x); }
+
+template<size_t Alignment = 1> static inline void writeI64xLE(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeI64xBE(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kBE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU64xLE(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kLE, Alignment>(p, x); }
+template<size_t Alignment = 1> static inline void writeU64xBE(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kBE, Alignment>(p, x); }
+
+static inline void writeI16a(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kNative, 2>(p, x); }
+static inline void writeI16u(void* p, int32_t x) noexcept { writeI16x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU16a(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kNative, 2>(p, x); }
+static inline void writeU16u(void* p, uint32_t x) noexcept { writeU16x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI16aLE(void* p, int32_t x) noexcept { writeI16xLE<2>(p, x); }
+static inline void writeI16uLE(void* p, int32_t x) noexcept { writeI16xLE<1>(p, x); }
+static inline void writeU16aLE(void* p, uint32_t x) noexcept { writeU16xLE<2>(p, x); }
+static inline void writeU16uLE(void* p, uint32_t x) noexcept { writeU16xLE<1>(p, x); }
+
+static inline void writeI16aBE(void* p, int32_t x) noexcept { writeI16xBE<2>(p, x); }
+static inline void writeI16uBE(void* p, int32_t x) noexcept { writeI16xBE<1>(p, x); }
+static inline void writeU16aBE(void* p, uint32_t x) noexcept { writeU16xBE<2>(p, x); }
+static inline void writeU16uBE(void* p, uint32_t x) noexcept { writeU16xBE<1>(p, x); }
+
+static inline void writeU24uLE(void* p, uint32_t v) noexcept { writeU24u<ByteOrder::kLE>(p, v); }
+static inline void writeU24uBE(void* p, uint32_t v) noexcept { writeU24u<ByteOrder::kBE>(p, v); }
+
+static inline void writeI32a(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kNative, 4>(p, x); }
+static inline void writeI32u(void* p, int32_t x) noexcept { writeI32x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU32a(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kNative, 4>(p, x); }
+static inline void writeU32u(void* p, uint32_t x) noexcept { writeU32x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI32aLE(void* p, int32_t x) noexcept { writeI32xLE<4>(p, x); }
+static inline void writeI32uLE(void* p, int32_t x) noexcept { writeI32xLE<1>(p, x); }
+static inline void writeU32aLE(void* p, uint32_t x) noexcept { writeU32xLE<4>(p, x); }
+static inline void writeU32uLE(void* p, uint32_t x) noexcept { writeU32xLE<1>(p, x); }
+
+static inline void writeI32aBE(void* p, int32_t x) noexcept { writeI32xBE<4>(p, x); }
+static inline void writeI32uBE(void* p, int32_t x) noexcept { writeI32xBE<1>(p, x); }
+static inline void writeU32aBE(void* p, uint32_t x) noexcept { writeU32xBE<4>(p, x); }
+static inline void writeU32uBE(void* p, uint32_t x) noexcept { writeU32xBE<1>(p, x); }
+
+static inline void writeI64a(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kNative, 8>(p, x); }
+static inline void writeI64u(void* p, int64_t x) noexcept { writeI64x<ByteOrder::kNative, 1>(p, x); }
+static inline void writeU64a(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kNative, 8>(p, x); }
+static inline void writeU64u(void* p, uint64_t x) noexcept { writeU64x<ByteOrder::kNative, 1>(p, x); }
+
+static inline void writeI64aLE(void* p, int64_t x) noexcept { writeI64xLE<8>(p, x); }
+static inline void writeI64uLE(void* p, int64_t x) noexcept { writeI64xLE<1>(p, x); }
+static inline void writeU64aLE(void* p, uint64_t x) noexcept { writeU64xLE<8>(p, x); }
+static inline void writeU64uLE(void* p, uint64_t x) noexcept { writeU64xLE<1>(p, x); }
+
+static inline void writeI64aBE(void* p, int64_t x) noexcept { writeI64xBE<8>(p, x); }
+static inline void writeI64uBE(void* p, int64_t x) noexcept { writeI64xBE<1>(p, x); }
+static inline void writeU64aBE(void* p, uint64_t x) noexcept { writeU64xBE<8>(p, x); }
+static inline void writeU64uBE(void* p, uint64_t x) noexcept { writeU64xBE<1>(p, x); }
+
+// ============================================================================
+// [asmjit::Support - Operators]
+// ============================================================================
+
+struct Set { template<typename T> static inline T op(T x, T y) noexcept { DebugUtils::unused(x); return y; } };
+struct SetNot { template<typename T> static inline T op(T x, T y) noexcept { DebugUtils::unused(x); return ~y; } };
+struct And { template<typename T> static inline T op(T x, T y) noexcept { return x & y; } };
+struct AndNot { template<typename T> static inline T op(T x, T y) noexcept { return x & ~y; } };
+struct NotAnd { template<typename T> static inline T op(T x, T y) noexcept { return ~x & y; } };
+struct Or { template<typename T> static inline T op(T x, T y) noexcept { return x | y; } };
+struct Xor { template<typename T> static inline T op(T x, T y) noexcept { return x ^ y; } };
+struct Add { template<typename T> static inline T op(T x, T y) noexcept { return x + y; } };
+struct Sub { template<typename T> static inline T op(T x, T y) noexcept { return x - y; } };
+struct Min { template<typename T> static inline T op(T x, T y) noexcept { return min<T>(x, y); } };
+struct Max { template<typename T> static inline T op(T x, T y) noexcept { return max<T>(x, y); } };
+
+// ============================================================================
+// [asmjit::Support - BitWordIterator]
+// ============================================================================
+
+//! Iterates over each bit in a number which is set to 1.
+//!
+//! Example of use:
+//!
+//! ```
+//! uint32_t bitsToIterate = 0x110F;
+//! Support::BitWordIterator<uint32_t> it(bitsToIterate);
+//!
+//! while (it.hasNext()) {
+//! uint32_t bitIndex = it.next();
+//! std::printf("Bit at %u is set\n", unsigned(bitIndex));
+//! }
+//! ```
+template<typename T>
+class BitWordIterator {
+public:
+ inline explicit BitWordIterator(T bitWord) noexcept
+ : _bitWord(bitWord) {}
+
+ inline void init(T bitWord) noexcept { _bitWord = bitWord; }
+ inline bool hasNext() const noexcept { return _bitWord != 0; }
+
+ inline uint32_t next() noexcept {
+ ASMJIT_ASSERT(_bitWord != 0);
+ uint32_t index = ctz(_bitWord);
+ _bitWord ^= T(1u) << index;
+ return index;
+ }
+
+ T _bitWord;
+};
+
+// ============================================================================
+// [asmjit::Support - BitVectorOps]
+// ============================================================================
+
+//! \cond
+namespace Internal {
+ template<typename T, class OperatorT, class FullWordOpT>
+ static inline void bitVectorOp(T* buf, size_t index, size_t count) noexcept {
+ if (count == 0)
+ return;
+
+ const size_t kTSizeInBits = bitSizeOf<T>();
+ size_t vecIndex = index / kTSizeInBits; // T[]
+ size_t bitIndex = index % kTSizeInBits; // T[][]
+
+ buf += vecIndex;
+
+ // The first BitWord requires special handling to preserve bits outside the fill region.
+ const T kFillMask = allOnes<T>();
+ size_t firstNBits = min<size_t>(kTSizeInBits - bitIndex, count);
+
+ buf[0] = OperatorT::op(buf[0], (kFillMask >> (kTSizeInBits - firstNBits)) << bitIndex);
+ buf++;
+ count -= firstNBits;
+
+ // All bits between the first and last affected BitWords can be just filled.
+ while (count >= kTSizeInBits) {
+ buf[0] = FullWordOpT::op(buf[0], kFillMask);
+ buf++;
+ count -= kTSizeInBits;
+ }
+
+ // The last BitWord requires special handling as well
+ if (count)
+ buf[0] = OperatorT::op(buf[0], kFillMask >> (kTSizeInBits - count));
+ }
+}
+//! \endcond
+
+//! Sets bit in a bit-vector `buf` at `index`.
+template<typename T>
+static inline bool bitVectorGetBit(T* buf, size_t index) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ return bool((buf[vecIndex] >> bitIndex) & 0x1u);
+}
+
+//! Sets bit in a bit-vector `buf` at `index` to `value`.
+template<typename T>
+static inline void bitVectorSetBit(T* buf, size_t index, bool value) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ T bitMask = T(1u) << bitIndex;
+ if (value)
+ buf[vecIndex] |= bitMask;
+ else
+ buf[vecIndex] &= ~bitMask;
+}
+
+//! Sets bit in a bit-vector `buf` at `index` to `value`.
+template<typename T>
+static inline void bitVectorFlipBit(T* buf, size_t index) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+
+ size_t vecIndex = index / kTSizeInBits;
+ size_t bitIndex = index % kTSizeInBits;
+
+ T bitMask = T(1u) << bitIndex;
+ buf[vecIndex] ^= bitMask;
+}
+
+//! Fills `count` bits in bit-vector `buf` starting at bit-index `index`.
+template<typename T>
+static inline void bitVectorFill(T* buf, size_t index, size_t count) noexcept { Internal::bitVectorOp<T, Or, Set>(buf, index, count); }
+
+//! Clears `count` bits in bit-vector `buf` starting at bit-index `index`.
+template<typename T>
+static inline void bitVectorClear(T* buf, size_t index, size_t count) noexcept { Internal::bitVectorOp<T, AndNot, SetNot>(buf, index, count); }
+
+template<typename T>
+static inline size_t bitVectorIndexOf(T* buf, size_t start, bool value) noexcept {
+ const size_t kTSizeInBits = bitSizeOf<T>();
+ size_t vecIndex = start / kTSizeInBits; // T[]
+ size_t bitIndex = start % kTSizeInBits; // T[][]
+
+ T* p = buf + vecIndex;
+
+ // We always look for zeros, if value is `true` we have to flip all bits before the search.
+ const T kFillMask = allOnes<T>();
+ const T kFlipMask = value ? T(0) : kFillMask;
+
+ // The first BitWord requires special handling as there are some bits we want to ignore.
+ T bits = (*p ^ kFlipMask) & (kFillMask << bitIndex);
+ for (;;) {
+ if (bits)
+ return (size_t)(p - buf) * kTSizeInBits + ctz(bits);
+ bits = *++p ^ kFlipMask;
+ }
+}
+
+// ============================================================================
+// [asmjit::Support - BitVectorIterator]
+// ============================================================================
+
+template<typename T>
+class BitVectorIterator {
+public:
+ ASMJIT_INLINE BitVectorIterator(const T* data, size_t numBitWords, size_t start = 0) noexcept {
+ init(data, numBitWords, start);
+ }
+
+ ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start = 0) noexcept {
+ const T* ptr = data + (start / bitSizeOf<T>());
+ size_t idx = alignDown(start, bitSizeOf<T>());
+ size_t end = numBitWords * bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = *ptr++ & (allOnes<T>() << (start % bitSizeOf<T>()));
+ while (!bitWord && (idx += bitSizeOf<T>()) < end)
+ bitWord = *ptr++;
+ }
+
+ _ptr = ptr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ }
+
+ ASMJIT_INLINE bool hasNext() const noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += bitSizeOf<T>()) < _end)
+ bitWord = *_ptr++;
+
+ _current = bitWord;
+ return n;
+ }
+
+ ASMJIT_INLINE size_t peekNext() const noexcept {
+ ASMJIT_ASSERT(_current != T(0));
+ return _idx + ctz(_current);
+ }
+
+ const T* _ptr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+};
+
+// ============================================================================
+// [asmjit::Support - BitVectorOpIterator]
+// ============================================================================
+
+template<typename T, class OperatorT>
+class BitVectorOpIterator {
+public:
+ static constexpr uint32_t kTSizeInBits = bitSizeOf<T>();
+
+ ASMJIT_INLINE BitVectorOpIterator(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept {
+ init(aData, bData, numBitWords, start);
+ }
+
+ ASMJIT_INLINE void init(const T* aData, const T* bData, size_t numBitWords, size_t start = 0) noexcept {
+ const T* aPtr = aData + (start / bitSizeOf<T>());
+ const T* bPtr = bData + (start / bitSizeOf<T>());
+ size_t idx = alignDown(start, bitSizeOf<T>());
+ size_t end = numBitWords * bitSizeOf<T>();
+
+ T bitWord = T(0);
+ if (idx < end) {
+ bitWord = OperatorT::op(*aPtr++, *bPtr++) & (allOnes<T>() << (start % bitSizeOf<T>()));
+ while (!bitWord && (idx += kTSizeInBits) < end)
+ bitWord = OperatorT::op(*aPtr++, *bPtr++);
+ }
+
+ _aPtr = aPtr;
+ _bPtr = bPtr;
+ _idx = idx;
+ _end = end;
+ _current = bitWord;
+ }
+
+ ASMJIT_INLINE bool hasNext() noexcept {
+ return _current != T(0);
+ }
+
+ ASMJIT_INLINE size_t next() noexcept {
+ T bitWord = _current;
+ ASMJIT_ASSERT(bitWord != T(0));
+
+ uint32_t bit = ctz(bitWord);
+ bitWord ^= T(1u) << bit;
+
+ size_t n = _idx + bit;
+ while (!bitWord && (_idx += kTSizeInBits) < _end)
+ bitWord = OperatorT::op(*_aPtr++, *_bPtr++);
+
+ _current = bitWord;
+ return n;
+ }
+
+ const T* _aPtr;
+ const T* _bPtr;
+ size_t _idx;
+ size_t _end;
+ T _current;
+};
+
+// ============================================================================
+// [asmjit::Support - Sorting]
+// ============================================================================
+
+//! Sort order.
+enum SortOrder : uint32_t {
+ kSortAscending = 0, //!< Ascending.
+ kSortDescending = 1 //!< Descending.
+};
+
+//! A helper class that provides comparison of any user-defined type that
+//! implements `<` and `>` operators (primitive types are supported as well).
+template<uint32_t Order = kSortAscending>
+struct Compare {
+ template<typename A, typename B>
+ inline int operator()(const A& a, const B& b) const noexcept {
+ return Order == kSortAscending ? int(a > b) - int(a < b)
+ : int(a < b) - int(a > b);
+ }
+};
+
+//! Insertion sort.
+template<typename T, typename CompareT = Compare<kSortAscending>>
+static inline void iSort(T* base, size_t size, const CompareT& cmp = CompareT()) noexcept {
+ for (T* pm = base + 1; pm < base + size; pm++)
+ for (T* pl = pm; pl > base && cmp(pl[-1], pl[0]) > 0; pl--)
+ std::swap(pl[-1], pl[0]);
+}
+
+//! \cond
+namespace Internal {
+ //! Quick-sort implementation.
+ template<typename T, class CompareT>
+ struct QSortImpl {
+ static constexpr size_t kStackSize = 64 * 2;
+ static constexpr size_t kISortThreshold = 7;
+
+ // Based on "PDCLib - Public Domain C Library" and rewritten to C++.
+ static void sort(T* base, size_t size, const CompareT& cmp) noexcept {
+ T* end = base + size;
+ T* stack[kStackSize];
+ T** stackptr = stack;
+
+ for (;;) {
+ if ((size_t)(end - base) > kISortThreshold) {
+ // We work from second to last - first will be pivot element.
+ T* pi = base + 1;
+ T* pj = end - 1;
+ std::swap(base[(size_t)(end - base) / 2], base[0]);
+
+ if (cmp(*pi , *pj ) > 0) std::swap(*pi , *pj );
+ if (cmp(*base, *pj ) > 0) std::swap(*base, *pj );
+ if (cmp(*pi , *base) > 0) std::swap(*pi , *base);
+
+ // Now we have the median for pivot element, entering main loop.
+ for (;;) {
+ while (pi < pj && cmp(*++pi, *base) < 0) continue; // Move `i` right until `*i >= pivot`.
+ while (pj > base && cmp(*--pj, *base) > 0) continue; // Move `j` left until `*j <= pivot`.
+
+ if (pi > pj) break;
+ std::swap(*pi, *pj);
+ }
+
+ // Move pivot into correct place.
+ std::swap(*base, *pj);
+
+ // Larger subfile base / end to stack, sort smaller.
+ if (pj - base > end - pi) {
+ // Left is larger.
+ *stackptr++ = base;
+ *stackptr++ = pj;
+ base = pi;
+ }
+ else {
+ // Right is larger.
+ *stackptr++ = pi;
+ *stackptr++ = end;
+ end = pj;
+ }
+ ASMJIT_ASSERT(stackptr <= stack + kStackSize);
+ }
+ else {
+ // UB sanitizer doesn't like applying offset to a nullptr base.
+ if (base != end)
+ iSort(base, (size_t)(end - base), cmp);
+
+ if (stackptr == stack)
+ break;
+
+ end = *--stackptr;
+ base = *--stackptr;
+ }
+ }
+ }
+ };
+}
+//! \endcond
+
+
+//! Quick sort implementation.
+//!
+//! The main reason to provide a custom qsort implementation is that we needed
+//! something that will never throw `bad_alloc` exception. This implementation
+//! doesn't use dynamic memory allocation.
+template<typename T, class CompareT = Compare<kSortAscending>>
+static inline void qSort(T* base, size_t size, const CompareT& cmp = CompareT()) noexcept {
+ Internal::QSortImpl<T, CompareT>::sort(base, size, cmp);
+}
+
+// ============================================================================
+// [asmjit::Support - Iterators]
+// ============================================================================
+
+template<typename T>
+class Iterator {
+public:
+ constexpr Iterator(T* p) noexcept : _p(p) {}
+ constexpr Iterator(const Iterator& other) noexcept = default;
+
+ inline Iterator& operator=(const Iterator& other) noexcept = default;
+
+ inline Iterator operator+(size_t n) const noexcept { return Iterator(_p + n); }
+ inline Iterator operator-(size_t n) const noexcept { return Iterator(_p - n); }
+
+ inline Iterator& operator+=(size_t n) noexcept { _p += n; return *this; }
+ inline Iterator& operator-=(size_t n) noexcept { _p -= n; return *this; }
+
+ inline Iterator& operator++() noexcept { return operator+=(1); }
+ inline Iterator& operator--() noexcept { return operator-=(1); }
+
+ inline Iterator operator++(int) noexcept { T* prev = _p; operator+=(1); return Iterator(prev); }
+ inline Iterator operator--(int) noexcept { T* prev = _p; operator-=(1); return Iterator(prev); }
+
+ inline bool operator==(const Iterator& other) noexcept { return _p == other._p; }
+ inline bool operator!=(const Iterator& other) noexcept { return _p != other._p; }
+
+ inline T& operator*() const noexcept { return _p[0]; }
+
+ T* _p;
+};
+
+template<typename T>
+class ReverseIterator {
+public:
+ constexpr ReverseIterator(T* p) noexcept : _p(p) {}
+ constexpr ReverseIterator(const ReverseIterator& other) noexcept = default;
+
+ inline ReverseIterator& operator=(const ReverseIterator& other) noexcept = default;
+
+ inline ReverseIterator operator+(size_t n) const noexcept { return ReverseIterator(_p + n); }
+ inline ReverseIterator operator-(size_t n) const noexcept { return ReverseIterator(_p - n); }
+
+ inline ReverseIterator& operator+=(size_t n) noexcept { _p -= n; return *this; }
+ inline ReverseIterator& operator-=(size_t n) noexcept { _p += n; return *this; }
+
+ inline ReverseIterator& operator++() noexcept { return operator+=(1); }
+ inline ReverseIterator& operator--() noexcept { return operator-=(1); }
+
+ inline ReverseIterator operator++(int) noexcept { T* prev = _p; operator+=(1); return ReverseIterator(prev); }
+ inline ReverseIterator operator--(int) noexcept { T* prev = _p; operator-=(1); return ReverseIterator(prev); }
+
+ inline bool operator==(const ReverseIterator& other) noexcept { return _p == other._p; }
+ inline bool operator!=(const ReverseIterator& other) noexcept { return _p != other._p; }
+
+ inline T& operator*() const noexcept { return _p[-1]; }
+
+ T* _p;
+};
+
+// ============================================================================
+// [asmjit::Support::Temporary]
+// ============================================================================
+
+//! Used to pass a temporary buffer to:
+//!
+//! - Containers that use user-passed buffer as an initial storage (still can grow).
+//! - Zone allocator that would use the temporary buffer as a first block.
+struct Temporary {
+ void* _data;
+ size_t _size;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ constexpr Temporary(const Temporary& other) noexcept = default;
+ constexpr Temporary(void* data, size_t size) noexcept
+ : _data(data),
+ _size(size) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Temporary& operator=(const Temporary& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the data storage.
+ template<typename T = void>
+ constexpr T* data() const noexcept { return static_cast<T*>(_data); }
+ //! Returns the data storage size in bytes.
+ constexpr size_t size() const noexcept { return _size; }
+
+ //! \}
+};
+
+} // {Support}
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_SUPPORT_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/target.cpp b/3rdparty/asmjit/src/asmjit/core/target.cpp
new file mode 100644
index 00000000000..ad120b443a3
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/target.cpp
@@ -0,0 +1,38 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/target.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Target - Construction / Destruction]
+// ============================================================================
+
+Target::Target() noexcept
+ : _targetType(kTargetNone),
+ _codeInfo() {}
+Target::~Target() noexcept {}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/target.h b/3rdparty/asmjit/src/asmjit/core/target.h
new file mode 100644
index 00000000000..f3da0e5de8e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/target.h
@@ -0,0 +1,210 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_TARGET_H_INCLUDED
+#define ASMJIT_CORE_TARGET_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/func.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::CodeInfo]
+// ============================================================================
+
+//! Basic information about a code (or target). It describes its architecture,
+//! code generation mode (or optimization level), and base address.
+class CodeInfo {
+public:
+ //!< Architecture information.
+ ArchInfo _archInfo;
+ //! Natural stack alignment (ARCH+OS).
+ uint8_t _stackAlignment;
+ //! Default CDECL calling convention.
+ uint8_t _cdeclCallConv;
+ //! Default STDCALL calling convention.
+ uint8_t _stdCallConv;
+ //! Default FASTCALL calling convention.
+ uint8_t _fastCallConv;
+ //! Base address.
+ uint64_t _baseAddress;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline CodeInfo() noexcept
+ : _archInfo(),
+ _stackAlignment(0),
+ _cdeclCallConv(CallConv::kIdNone),
+ _stdCallConv(CallConv::kIdNone),
+ _fastCallConv(CallConv::kIdNone),
+ _baseAddress(Globals::kNoBaseAddress) {}
+
+ inline explicit CodeInfo(uint32_t archId, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
+ : _archInfo(archId, archMode),
+ _stackAlignment(0),
+ _cdeclCallConv(CallConv::kIdNone),
+ _stdCallConv(CallConv::kIdNone),
+ _fastCallConv(CallConv::kIdNone),
+ _baseAddress(baseAddress) {}
+
+ inline CodeInfo(const CodeInfo& other) noexcept { init(other); }
+
+ inline bool isInitialized() const noexcept {
+ return _archInfo.archId() != ArchInfo::kIdNone;
+ }
+
+ inline void init(const CodeInfo& other) noexcept {
+ *this = other;
+ }
+
+ inline void init(uint32_t archId, uint32_t archMode = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
+ _archInfo.init(archId, archMode);
+ _stackAlignment = 0;
+ _cdeclCallConv = CallConv::kIdNone;
+ _stdCallConv = CallConv::kIdNone;
+ _fastCallConv = CallConv::kIdNone;
+ _baseAddress = baseAddress;
+ }
+
+ inline void reset() noexcept {
+ _archInfo.reset();
+ _stackAlignment = 0;
+ _cdeclCallConv = CallConv::kIdNone;
+ _stdCallConv = CallConv::kIdNone;
+ _fastCallConv = CallConv::kIdNone;
+ _baseAddress = Globals::kNoBaseAddress;
+ }
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline CodeInfo& operator=(const CodeInfo& other) noexcept = default;
+
+ inline bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
+ inline bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the target architecture information, see `ArchInfo`.
+ inline const ArchInfo& archInfo() const noexcept { return _archInfo; }
+
+ //! Returns the target architecture id, see `ArchInfo::Id`.
+ inline uint32_t archId() const noexcept { return _archInfo.archId(); }
+ //! Returns the target architecture sub-type, see `ArchInfo::SubId`.
+ inline uint32_t archSubId() const noexcept { return _archInfo.archSubId(); }
+ //! Returns the native size of the target's architecture GP register.
+ inline uint32_t gpSize() const noexcept { return _archInfo.gpSize(); }
+ //! Returns the number of GP registers of the target's architecture.
+ inline uint32_t gpCount() const noexcept { return _archInfo.gpCount(); }
+
+ //! Returns a natural stack alignment that must be honored (or 0 if not known).
+ inline uint32_t stackAlignment() const noexcept { return _stackAlignment; }
+ //! Sets a natural stack alignment that must be honored.
+ inline void setStackAlignment(uint32_t sa) noexcept { _stackAlignment = uint8_t(sa); }
+
+ inline uint32_t cdeclCallConv() const noexcept { return _cdeclCallConv; }
+ inline void setCdeclCallConv(uint32_t cc) noexcept { _cdeclCallConv = uint8_t(cc); }
+
+ inline uint32_t stdCallConv() const noexcept { return _stdCallConv; }
+ inline void setStdCallConv(uint32_t cc) noexcept { _stdCallConv = uint8_t(cc); }
+
+ inline uint32_t fastCallConv() const noexcept { return _fastCallConv; }
+ inline void setFastCallConv(uint32_t cc) noexcept { _fastCallConv = uint8_t(cc); }
+
+ inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
+ inline uint64_t baseAddress() const noexcept { return _baseAddress; }
+ inline void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
+ inline void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::Target]
+// ============================================================================
+
+//! Target is an abstract class that describes a machine code target.
+class ASMJIT_VIRTAPI Target {
+public:
+ ASMJIT_BASE_CLASS(Target)
+ ASMJIT_NONCOPYABLE(Target)
+
+ //! Tartget type, see `TargetType`.
+ uint8_t _targetType;
+ //! Reserved for future use.
+ uint8_t _reserved[7];
+ //! Basic information about the Runtime's code.
+ CodeInfo _codeInfo;
+
+ enum TargetType : uint32_t {
+ //! Uninitialized target or unknown target type.
+ kTargetNone = 0,
+ //! JIT target type, see `JitRuntime`.
+ kTargetJit = 1
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a `Target` instance.
+ ASMJIT_API Target() noexcept;
+ //! Destroys the `Target` instance.
+ ASMJIT_API virtual ~Target() noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns CodeInfo of this target.
+ //!
+ //! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
+ //! code compatible and executable by this Runtime.
+ inline const CodeInfo& codeInfo() const noexcept { return _codeInfo; }
+
+ //! Returns the target architecture id, see `ArchInfo::Id`.
+ inline uint32_t archId() const noexcept { return _codeInfo.archId(); }
+ //! Returns the target architecture sub-id, see `ArchInfo::SubId`.
+ inline uint32_t archSubId() const noexcept { return _codeInfo.archSubId(); }
+
+ //! Returns the target type, see `TargetType`.
+ inline uint32_t targetType() const noexcept { return _targetType; }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_TARGET_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/type.cpp b/3rdparty/asmjit/src/asmjit/core/type.cpp
new file mode 100644
index 00000000000..67662db5075
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/type.cpp
@@ -0,0 +1,44 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/misc_p.h"
+#include "../core/type.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Type]
+// ============================================================================
+
+const Type::TypeData Type::_typeData = {
+ #define VALUE(X) Type::BaseOfTypeId<X>::kTypeId
+ { ASMJIT_LOOKUP_TABLE_256(VALUE, 0) },
+ #undef VALUE
+
+ #define VALUE(X) Type::SizeOfTypeId<X>::kTypeSize
+ { ASMJIT_LOOKUP_TABLE_256(VALUE, 0) }
+ #undef VALUE
+};
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/type.h b/3rdparty/asmjit/src/asmjit/core/type.h
new file mode 100644
index 00000000000..59a17e92625
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/type.h
@@ -0,0 +1,398 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_TYPE_H_INCLUDED
+#define ASMJIT_CORE_TYPE_H_INCLUDED
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_core
+//! \{
+
+// ============================================================================
+// [asmjit::Type]
+// ============================================================================
+
+//! Provides minimum type-system that is used by \ref asmjit_func and \ref asmjit_compiler.
+namespace Type {
+
+//! TypeId.
+//!
+//! This is an additional information that can be used to describe a value-type
+//! of physical or virtual register. it's used mostly by BaseCompiler to describe
+//! register representation (the group of data stored in the register and the
+//! width used) and it's also used by APIs that allow to describe and work with
+//! function signatures.
+enum Id : uint32_t {
+ kIdVoid = 0,
+
+ _kIdBaseStart = 32,
+ _kIdBaseEnd = 44,
+
+ _kIdIntStart = 32,
+ _kIdIntEnd = 41,
+
+ kIdIntPtr = 32,
+ kIdUIntPtr = 33,
+
+ kIdI8 = 34,
+ kIdU8 = 35,
+ kIdI16 = 36,
+ kIdU16 = 37,
+ kIdI32 = 38,
+ kIdU32 = 39,
+ kIdI64 = 40,
+ kIdU64 = 41,
+
+ _kIdFloatStart = 42,
+ _kIdFloatEnd = 44,
+
+ kIdF32 = 42,
+ kIdF64 = 43,
+ kIdF80 = 44,
+
+ _kIdMaskStart = 45,
+ _kIdMaskEnd = 48,
+
+ kIdMask8 = 45,
+ kIdMask16 = 46,
+ kIdMask32 = 47,
+ kIdMask64 = 48,
+
+ _kIdMmxStart = 49,
+ _kIdMmxEnd = 50,
+
+ kIdMmx32 = 49,
+ kIdMmx64 = 50,
+
+ _kIdVec32Start = 51,
+ _kIdVec32End = 60,
+
+ kIdI8x4 = 51,
+ kIdU8x4 = 52,
+ kIdI16x2 = 53,
+ kIdU16x2 = 54,
+ kIdI32x1 = 55,
+ kIdU32x1 = 56,
+ kIdF32x1 = 59,
+
+ _kIdVec64Start = 61,
+ _kIdVec64End = 70,
+
+ kIdI8x8 = 61,
+ kIdU8x8 = 62,
+ kIdI16x4 = 63,
+ kIdU16x4 = 64,
+ kIdI32x2 = 65,
+ kIdU32x2 = 66,
+ kIdI64x1 = 67,
+ kIdU64x1 = 68,
+ kIdF32x2 = 69,
+ kIdF64x1 = 70,
+
+ _kIdVec128Start = 71,
+ _kIdVec128End = 80,
+
+ kIdI8x16 = 71,
+ kIdU8x16 = 72,
+ kIdI16x8 = 73,
+ kIdU16x8 = 74,
+ kIdI32x4 = 75,
+ kIdU32x4 = 76,
+ kIdI64x2 = 77,
+ kIdU64x2 = 78,
+ kIdF32x4 = 79,
+ kIdF64x2 = 80,
+
+ _kIdVec256Start = 81,
+ _kIdVec256End = 90,
+
+ kIdI8x32 = 81,
+ kIdU8x32 = 82,
+ kIdI16x16 = 83,
+ kIdU16x16 = 84,
+ kIdI32x8 = 85,
+ kIdU32x8 = 86,
+ kIdI64x4 = 87,
+ kIdU64x4 = 88,
+ kIdF32x8 = 89,
+ kIdF64x4 = 90,
+
+ _kIdVec512Start = 91,
+ _kIdVec512End = 100,
+
+ kIdI8x64 = 91,
+ kIdU8x64 = 92,
+ kIdI16x32 = 93,
+ kIdU16x32 = 94,
+ kIdI32x16 = 95,
+ kIdU32x16 = 96,
+ kIdI64x8 = 97,
+ kIdU64x8 = 98,
+ kIdF32x16 = 99,
+ kIdF64x8 = 100,
+
+ kIdCount = 101,
+ kIdMax = 255
+};
+
+struct TypeData {
+ uint8_t baseOf[kIdMax + 1];
+ uint8_t sizeOf[kIdMax + 1];
+};
+ASMJIT_VARAPI const TypeData _typeData;
+
+static constexpr bool isVoid(uint32_t typeId) noexcept { return typeId == 0; }
+static constexpr bool isValid(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdVec512End; }
+static constexpr bool isBase(uint32_t typeId) noexcept { return typeId >= _kIdBaseStart && typeId <= _kIdBaseEnd; }
+static constexpr bool isAbstract(uint32_t typeId) noexcept { return typeId >= kIdIntPtr && typeId <= kIdUIntPtr; }
+
+static constexpr bool isInt(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdIntEnd; }
+static constexpr bool isInt8(uint32_t typeId) noexcept { return typeId == kIdI8; }
+static constexpr bool isUInt8(uint32_t typeId) noexcept { return typeId == kIdU8; }
+static constexpr bool isInt16(uint32_t typeId) noexcept { return typeId == kIdI16; }
+static constexpr bool isUInt16(uint32_t typeId) noexcept { return typeId == kIdU16; }
+static constexpr bool isInt32(uint32_t typeId) noexcept { return typeId == kIdI32; }
+static constexpr bool isUInt32(uint32_t typeId) noexcept { return typeId == kIdU32; }
+static constexpr bool isInt64(uint32_t typeId) noexcept { return typeId == kIdI64; }
+static constexpr bool isUInt64(uint32_t typeId) noexcept { return typeId == kIdU64; }
+
+static constexpr bool isGp8(uint32_t typeId) noexcept { return typeId >= kIdI8 && typeId <= kIdU8; }
+static constexpr bool isGp16(uint32_t typeId) noexcept { return typeId >= kIdI16 && typeId <= kIdU16; }
+static constexpr bool isGp32(uint32_t typeId) noexcept { return typeId >= kIdI32 && typeId <= kIdU32; }
+static constexpr bool isGp64(uint32_t typeId) noexcept { return typeId >= kIdI64 && typeId <= kIdU64; }
+
+static constexpr bool isFloat(uint32_t typeId) noexcept { return typeId >= _kIdFloatStart && typeId <= _kIdFloatEnd; }
+static constexpr bool isFloat32(uint32_t typeId) noexcept { return typeId == kIdF32; }
+static constexpr bool isFloat64(uint32_t typeId) noexcept { return typeId == kIdF64; }
+static constexpr bool isFloat80(uint32_t typeId) noexcept { return typeId == kIdF80; }
+
+static constexpr bool isMask(uint32_t typeId) noexcept { return typeId >= _kIdMaskStart && typeId <= _kIdMaskEnd; }
+static constexpr bool isMask8(uint32_t typeId) noexcept { return typeId == kIdMask8; }
+static constexpr bool isMask16(uint32_t typeId) noexcept { return typeId == kIdMask16; }
+static constexpr bool isMask32(uint32_t typeId) noexcept { return typeId == kIdMask32; }
+static constexpr bool isMask64(uint32_t typeId) noexcept { return typeId == kIdMask64; }
+
+static constexpr bool isMmx(uint32_t typeId) noexcept { return typeId >= _kIdMmxStart && typeId <= _kIdMmxEnd; }
+static constexpr bool isMmx32(uint32_t typeId) noexcept { return typeId == kIdMmx32; }
+static constexpr bool isMmx64(uint32_t typeId) noexcept { return typeId == kIdMmx64; }
+
+static constexpr bool isVec(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec512End; }
+static constexpr bool isVec32(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec32End; }
+static constexpr bool isVec64(uint32_t typeId) noexcept { return typeId >= _kIdVec64Start && typeId <= _kIdVec64End; }
+static constexpr bool isVec128(uint32_t typeId) noexcept { return typeId >= _kIdVec128Start && typeId <= _kIdVec128End; }
+static constexpr bool isVec256(uint32_t typeId) noexcept { return typeId >= _kIdVec256Start && typeId <= _kIdVec256End; }
+static constexpr bool isVec512(uint32_t typeId) noexcept { return typeId >= _kIdVec512Start && typeId <= _kIdVec512End; }
+
+//! IdOfT<> template allows to get a TypeId of a C++ `T` type.
+template<typename T> struct IdOfT { /* Fail if not specialized. */ };
+
+//! \cond
+template<typename T> struct IdOfT<T*> {
+ enum : uint32_t { kTypeId = kIdUIntPtr };
+};
+
+template<typename T> struct IdOfT<T&> {
+ enum : uint32_t { kTypeId = kIdUIntPtr };
+};
+
+template<typename T>
+struct IdOfIntT {
+ static constexpr uint32_t kTypeId =
+ sizeof(T) == 1 ? (std::is_signed<T>::value ? kIdI8 : kIdU8 ) :
+ sizeof(T) == 2 ? (std::is_signed<T>::value ? kIdI16 : kIdU16) :
+ sizeof(T) == 4 ? (std::is_signed<T>::value ? kIdI32 : kIdU32) :
+ sizeof(T) == 8 ? (std::is_signed<T>::value ? kIdI64 : kIdU64) : kIdVoid;
+};
+
+template<uint32_t TYPE_ID>
+struct BaseOfTypeId {
+ static constexpr uint32_t kTypeId =
+ isBase (TYPE_ID) ? TYPE_ID :
+ isMask8 (TYPE_ID) ? kIdU8 :
+ isMask16(TYPE_ID) ? kIdU16 :
+ isMask32(TYPE_ID) ? kIdU32 :
+ isMask64(TYPE_ID) ? kIdU64 :
+ isMmx32 (TYPE_ID) ? kIdI32 :
+ isMmx64 (TYPE_ID) ? kIdI64 :
+ isVec32 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec32Start :
+ isVec64 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec64Start :
+ isVec128(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec128Start :
+ isVec256(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec256Start :
+ isVec512(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec512Start : 0;
+};
+
+template<uint32_t TYPE_ID>
+struct SizeOfTypeId {
+ static constexpr uint32_t kTypeSize =
+ isInt8 (TYPE_ID) ? 1 :
+ isUInt8 (TYPE_ID) ? 1 :
+ isInt16 (TYPE_ID) ? 2 :
+ isUInt16 (TYPE_ID) ? 2 :
+ isInt32 (TYPE_ID) ? 4 :
+ isUInt32 (TYPE_ID) ? 4 :
+ isInt64 (TYPE_ID) ? 8 :
+ isUInt64 (TYPE_ID) ? 8 :
+ isFloat32(TYPE_ID) ? 4 :
+ isFloat64(TYPE_ID) ? 8 :
+ isFloat80(TYPE_ID) ? 10 :
+ isMask8 (TYPE_ID) ? 1 :
+ isMask16 (TYPE_ID) ? 2 :
+ isMask32 (TYPE_ID) ? 4 :
+ isMask64 (TYPE_ID) ? 8 :
+ isMmx32 (TYPE_ID) ? 4 :
+ isMmx64 (TYPE_ID) ? 8 :
+ isVec32 (TYPE_ID) ? 4 :
+ isVec64 (TYPE_ID) ? 8 :
+ isVec128 (TYPE_ID) ? 16 :
+ isVec256 (TYPE_ID) ? 32 :
+ isVec512 (TYPE_ID) ? 64 : 0;
+};
+//! \endcond
+
+static inline uint32_t baseOf(uint32_t typeId) noexcept {
+ ASMJIT_ASSERT(typeId <= kIdMax);
+ return _typeData.baseOf[typeId];
+}
+
+static inline uint32_t sizeOf(uint32_t typeId) noexcept {
+ ASMJIT_ASSERT(typeId <= kIdMax);
+ return _typeData.sizeOf[typeId];
+}
+
+//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId
+//! into a type that matches `gpSize` (general-purpose register size).
+//! If you find such TypeId it's then only about adding the offset to it.
+//!
+//! For example:
+//!
+//! ```
+//! uint32_t gpSize = '4' or '8';
+//! uint32_t deabstractDelta = Type::deabstractDeltaOfSize(gpSize);
+//!
+//! uint32_t typeId = 'some type-id';
+//!
+//! // Normalize some typeId into a non-abstract typeId.
+//! if (Type::isAbstract(typeId)) typeId += deabstractDelta;
+//!
+//! // The same, but by using Type::deabstract() function.
+//! typeId = Type::deabstract(typeId, deabstractDelta);
+//! ```
+static constexpr uint32_t deabstractDeltaOfSize(uint32_t gpSize) noexcept {
+ return gpSize >= 8 ? kIdI64 - kIdIntPtr : kIdI32 - kIdIntPtr;
+}
+
+static constexpr uint32_t deabstract(uint32_t typeId, uint32_t deabstractDelta) noexcept {
+ return isAbstract(typeId) ? typeId + deabstractDelta : typeId;
+}
+
+//! bool as C++ type-name.
+struct Bool {};
+//! int8_t as C++ type-name.
+struct I8 {};
+//! uint8_t as C++ type-name.
+struct U8 {};
+//! int16_t as C++ type-name.
+struct I16 {};
+//! uint16_t as C++ type-name.
+struct U16 {};
+//! int32_t as C++ type-name.
+struct I32 {};
+//! uint32_t as C++ type-name.
+struct U32 {};
+//! int64_t as C++ type-name.
+struct I64 {};
+//! uint64_t as C++ type-name.
+struct U64 {};
+//! intptr_t as C++ type-name.
+struct IPtr {};
+//! uintptr_t as C++ type-name.
+struct UPtr {};
+//! float as C++ type-name.
+struct F32 {};
+//! double as C++ type-name.
+struct F64 {};
+
+} // {Type}
+
+// ============================================================================
+// [ASMJIT_DEFINE_TYPE_ID]
+// ============================================================================
+
+//! \cond
+#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
+namespace Type { \
+ template<> \
+ struct IdOfT<T> { \
+ enum : uint32_t { kTypeId = TYPE_ID }; \
+ }; \
+}
+
+ASMJIT_DEFINE_TYPE_ID(bool , IdOfIntT<bool >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(char , IdOfIntT<char >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(signed char , IdOfIntT<signed char >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(unsigned char , IdOfIntT<unsigned char >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(short , IdOfIntT<short >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(unsigned short , IdOfIntT<unsigned short >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(int , IdOfIntT<int >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(unsigned int , IdOfIntT<unsigned int >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(long , IdOfIntT<long >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(unsigned long , IdOfIntT<unsigned long >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(long long , IdOfIntT<long long >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(unsigned long long, IdOfIntT<unsigned long long>::kTypeId);
+
+#if ASMJIT_CXX_HAS_NATIVE_WCHAR_T
+ASMJIT_DEFINE_TYPE_ID(wchar_t , IdOfIntT<wchar_t >::kTypeId);
+#endif
+
+#if ASMJIT_CXX_HAS_UNICODE_LITERALS
+ASMJIT_DEFINE_TYPE_ID(char16_t , IdOfIntT<char16_t >::kTypeId);
+ASMJIT_DEFINE_TYPE_ID(char32_t , IdOfIntT<char32_t >::kTypeId);
+#endif
+
+ASMJIT_DEFINE_TYPE_ID(void , kIdVoid);
+ASMJIT_DEFINE_TYPE_ID(float , kIdF32);
+ASMJIT_DEFINE_TYPE_ID(double , kIdF64);
+
+ASMJIT_DEFINE_TYPE_ID(Bool , kIdU8);
+ASMJIT_DEFINE_TYPE_ID(I8 , kIdI8);
+ASMJIT_DEFINE_TYPE_ID(U8 , kIdU8);
+ASMJIT_DEFINE_TYPE_ID(I16 , kIdI16);
+ASMJIT_DEFINE_TYPE_ID(U16 , kIdU16);
+ASMJIT_DEFINE_TYPE_ID(I32 , kIdI32);
+ASMJIT_DEFINE_TYPE_ID(U32 , kIdU32);
+ASMJIT_DEFINE_TYPE_ID(I64 , kIdI64);
+ASMJIT_DEFINE_TYPE_ID(U64 , kIdU64);
+ASMJIT_DEFINE_TYPE_ID(IPtr , kIdIntPtr);
+ASMJIT_DEFINE_TYPE_ID(UPtr , kIdUIntPtr);
+ASMJIT_DEFINE_TYPE_ID(F32 , kIdF32);
+ASMJIT_DEFINE_TYPE_ID(F64 , kIdF64);
+//! \endcond
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_TYPE_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/virtmem.cpp b/3rdparty/asmjit/src/asmjit/core/virtmem.cpp
new file mode 100644
index 00000000000..97f7ceb5b2b
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/virtmem.cpp
@@ -0,0 +1,589 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/osutils.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/virtmem.h"
+
+#if !defined(_WIN32)
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <sys/mman.h>
+ #include <sys/stat.h>
+ #include <sys/types.h>
+ #include <unistd.h>
+
+ // Linux has a `memfd_create` syscall that we would like to use, if available.
+ #if defined(__linux__)
+ #include <sys/syscall.h>
+ #endif
+
+ // Apple recently introduced MAP_JIT flag, which we want to use.
+ #if defined(__APPLE__)
+ #include <TargetConditionals.h>
+ #if TARGET_OS_OSX
+ #include <sys/utsname.h>
+ #endif
+ // Older SDK doesn't define `MAP_JIT`.
+ #ifndef MAP_JIT
+ #define MAP_JIT 0x800
+ #endif
+ #endif
+
+ // BSD/OSX: `MAP_ANONYMOUS` is not defined, `MAP_ANON` is.
+ #if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+#endif
+
+#include <atomic>
+
+#if defined(__APPLE__)
+ #define ASMJIT_VM_SHM_DETECT 0
+#else
+ #define ASMJIT_VM_SHM_DETECT 1
+#endif
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::VirtMem - Utilities]
+// ============================================================================
+
+static const uint32_t VirtMem_dualMappingFilter[2] = {
+ VirtMem::kAccessWrite,
+ VirtMem::kAccessExecute
+};
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Windows]]
+// ============================================================================
+
+#if defined(_WIN32)
+struct ScopedHandle {
+ inline ScopedHandle() noexcept
+ : value(nullptr) {}
+
+ inline ~ScopedHandle() noexcept {
+ if (value != nullptr)
+ ::CloseHandle(value);
+ }
+
+ HANDLE value;
+};
+
+static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
+ SYSTEM_INFO systemInfo;
+
+ ::GetSystemInfo(&systemInfo);
+ vmInfo.pageSize = Support::alignUpPowerOf2<uint32_t>(systemInfo.dwPageSize);
+ vmInfo.pageGranularity = systemInfo.dwAllocationGranularity;
+}
+
+// Windows specific implementation that uses `VirtualAlloc` and `VirtualFree`.
+static DWORD VirtMem_accessToWinProtectFlags(uint32_t flags) noexcept {
+ DWORD protectFlags;
+
+ // READ|WRITE|EXECUTE.
+ if (flags & VirtMem::kAccessExecute)
+ protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
+ else if (flags & VirtMem::kAccessReadWrite)
+ protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
+ else
+ protectFlags = PAGE_NOACCESS;
+
+ // Any other flags to consider?
+ return protectFlags;
+}
+
+static DWORD VirtMem_accessToWinDesiredAccess(uint32_t flags) noexcept {
+ DWORD access = (flags & VirtMem::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
+ if (flags & VirtMem::kAccessExecute)
+ access |= FILE_MAP_EXECUTE;
+ return access;
+}
+
+Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
+ *p = nullptr;
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ DWORD protectFlags = VirtMem_accessToWinProtectFlags(flags);
+ void* result = ::VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, protectFlags);
+
+ if (!result)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = result;
+ return kErrorOk;
+}
+
+Error VirtMem::release(void* p, size_t size) noexcept {
+ DebugUtils::unused(size);
+ if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, MEM_RELEASE)))
+ return DebugUtils::errored(kErrorInvalidArgument);
+ return kErrorOk;
+}
+
+Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
+ DWORD protectFlags = VirtMem_accessToWinProtectFlags(flags);
+ DWORD oldFlags;
+
+ if (::VirtualProtect(p, size, protectFlags, &oldFlags))
+ return kErrorOk;
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ ScopedHandle handle;
+ handle.value = ::CreateFileMappingW(
+ INVALID_HANDLE_VALUE,
+ nullptr,
+ PAGE_EXECUTE_READWRITE,
+ (DWORD)(uint64_t(size) >> 32),
+ (DWORD)(size & 0xFFFFFFFFu),
+ nullptr);
+
+ if (ASMJIT_UNLIKELY(!handle.value))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ void* ptr[2];
+ for (uint32_t i = 0; i < 2; i++) {
+ DWORD desiredAccess = VirtMem_accessToWinDesiredAccess(flags & ~VirtMem_dualMappingFilter[i]);
+ ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
+
+ if (ptr[i] == nullptr) {
+ if (i == 0)
+ ::UnmapViewOfFile(ptr[0]);
+ return DebugUtils::errored(kErrorOutOfMemory);
+ }
+ }
+
+ dm->ro = ptr[0];
+ dm->rw = ptr[1];
+ return kErrorOk;
+}
+
+Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
+ DebugUtils::unused(size);
+ bool failed = false;
+
+ if (!::UnmapViewOfFile(dm->ro))
+ failed = true;
+
+ if (dm->ro != dm->rw && !UnmapViewOfFile(dm->rw))
+ failed = true;
+
+ if (failed)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+ return kErrorOk;
+}
+#endif
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Posix]]
+// ============================================================================
+
+#if !defined(_WIN32)
+struct ScopedFD {
+ inline ScopedFD() noexcept
+ : value(-1) {}
+
+ inline ~ScopedFD() noexcept {
+ if (value != -1)
+ close(value);
+ }
+
+ int value;
+};
+
+static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
+ uint32_t pageSize = uint32_t(::getpagesize());
+
+ vmInfo.pageSize = pageSize;
+ vmInfo.pageGranularity = Support::max<uint32_t>(pageSize, 65536);
+}
+
+// Some operating systems don't allow /dev/shm to be executable. On Linux this
+// happens when /dev/shm is mounted with 'noexec', which is enforced by systemd.
+// Other operating systems like OSX also restrict executable permissions regarding
+// /dev/shm, so we use a runtime detection before trying to allocate the requested
+// memory by the user. Sometimes we don't need the detection as we know it would
+// always result in 'kShmStrategyTmpDir'.
+enum ShmStrategy : uint32_t {
+ kShmStrategyUnknown = 0,
+ kShmStrategyDevShm = 1,
+ kShmStrategyTmpDir = 2
+};
+
+// Posix specific implementation that uses `mmap()` and `munmap()`.
+static int VirtMem_accessToPosixProtection(uint32_t flags) noexcept {
+ int protection = 0;
+ if (flags & VirtMem::kAccessRead ) protection |= PROT_READ;
+ if (flags & VirtMem::kAccessWrite ) protection |= PROT_READ | PROT_WRITE;
+ if (flags & VirtMem::kAccessExecute) protection |= PROT_READ | PROT_EXEC;
+ return protection;
+}
+
+// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
+static Error VirtMem_makeErrorFromErrno(int e) noexcept {
+ switch (e) {
+ case EACCES:
+ case EAGAIN:
+ case ENODEV:
+ case EPERM:
+ return kErrorInvalidState;
+
+ case EFBIG:
+ case ENOMEM:
+ case EOVERFLOW:
+ return kErrorOutOfMemory;
+
+ case EMFILE:
+ case ENFILE:
+ return kErrorTooManyHandles;
+
+ default:
+ return kErrorInvalidArgument;
+ }
+}
+
+#if defined(__APPLE__)
+// Detects whether the current process is hardened, which means that pages that
+// have WRITE and EXECUTABLE flags cannot be allocated without MAP_JIT flag.
+static ASMJIT_INLINE bool VirtMem_isHardened() noexcept {
+ static volatile uint32_t globalHardenedFlag;
+
+ enum HardenedFlag : uint32_t {
+ kHardenedFlagUnknown = 0,
+ kHardenedFlagDisabled = 1,
+ kHardenedFlagEnabled = 2
+ };
+
+ uint32_t flag = globalHardenedFlag;
+ if (flag == kHardenedFlagUnknown) {
+ VirtMem::Info memInfo;
+ VirtMem_getInfo(memInfo);
+
+ void* ptr = mmap(nullptr, memInfo.pageSize, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ptr == MAP_FAILED) {
+ flag = kHardenedFlagEnabled;
+ }
+ else {
+ flag = kHardenedFlagDisabled;
+ munmap(ptr, memInfo.pageSize);
+ }
+ globalHardenedFlag = flag;
+ }
+
+ return flag == kHardenedFlagEnabled;
+}
+
+// MAP_JIT flag required to run unsigned JIT code is only supported by kernel
+// version 10.14+ (Mojave) and IOS.
+static ASMJIT_INLINE bool VirtMem_hasMapJitSupport() noexcept {
+#if TARGET_OS_OSX
+ static volatile uint32_t globalVersion;
+
+ uint32_t ver = globalVersion;
+ if (!ver) {
+ struct utsname osname;
+ uname(&osname);
+ ver = atoi(osname.release);
+ globalVersion = ver;
+ }
+ return ver >= 18;
+#else
+ // Assume it's available.
+ return true;
+#endif
+}
+
+static ASMJIT_INLINE uint32_t VirtMem_appleSpecificMMapFlags(uint32_t flags) {
+ // Always use MAP_JIT flag if user asked for it (could be used for testing
+ // on non-hardened processes) and detect whether it must be used when the
+ // process is actually hardened (in that case it doesn't make sense to rely
+ // on user `flags`).
+ bool useMapJit = ((flags & VirtMem::kMMapEnableMapJit) != 0) || VirtMem_isHardened();
+ if (useMapJit)
+ return VirtMem_hasMapJitSupport() ? MAP_JIT : 0u;
+ else
+ return 0;
+}
+#else
+static ASMJIT_INLINE uint32_t VirtMem_appleSpecificMMapFlags(uint32_t flags) {
+ DebugUtils::unused(flags);
+ return 0;
+}
+#endif
+
+static const char* VirtMem_getTmpDir() noexcept {
+ const char* tmpDir = getenv("TMPDIR");
+ return tmpDir ? tmpDir : "/tmp";
+}
+
+static Error VirtMem_openAnonymousMemory(int* fd, bool preferTmpOverDevShm) noexcept {
+#if defined(SYS_memfd_create)
+ // Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means
+ // it's not available and we will never call it again (would be pointless).
+
+ // Zero initialized, if ever changed to '1' that would mean the syscall is not
+ // available and we must use `shm_open()` and `shm_unlink()`.
+ static volatile uint32_t memfd_create_not_supported;
+
+ if (!memfd_create_not_supported) {
+ *fd = (int)syscall(SYS_memfd_create, "vmem", 0);
+ if (ASMJIT_LIKELY(*fd >= 0))
+ return kErrorOk;
+
+ int e = errno;
+ if (e == ENOSYS)
+ memfd_create_not_supported = 1;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+#endif
+
+#if defined(SHM_ANON)
+ // Originally FreeBSD extension, apparently works in other BSDs too.
+ DebugUtils::unused(preferTmpOverDevShm);
+ *fd = shm_open(SHM_ANON, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+
+ if (ASMJIT_LIKELY(*fd >= 0))
+ return kErrorOk;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+#else
+ // POSIX API. We have to generate somehow a unique name. This is nothing
+ // cryptographic, just using a bit from the stack address to always have
+ // a different base for different threads (as threads have their own stack)
+ // and retries for avoiding collisions. We use `shm_open()` with flags that
+ // require creation of the file so we never open an existing shared memory.
+ static std::atomic<uint32_t> internalCounter;
+
+ StringTmp<128> uniqueName;
+ const char* kShmFormat = "/shm-id-%08llX";
+
+ uint32_t kRetryCount = 100;
+ uint64_t bits = ((uintptr_t)(void*)&uniqueName) & 0x55555555u;
+
+ for (uint32_t i = 0; i < kRetryCount; i++) {
+ bits -= uint64_t(OSUtils::getTickCount()) * 773703683;
+ bits = ((bits >> 14) ^ (bits << 6)) + uint64_t(++internalCounter) * 10619863;
+
+ if (!ASMJIT_VM_SHM_DETECT || preferTmpOverDevShm) {
+ uniqueName.assignString(VirtMem_getTmpDir());
+ uniqueName.appendFormat(kShmFormat, (unsigned long long)bits);
+ *fd = open(uniqueName.data(), O_RDWR | O_CREAT | O_EXCL, 0);
+ if (ASMJIT_LIKELY(*fd >= 0)) {
+ unlink(uniqueName.data());
+ return kErrorOk;
+ }
+ }
+ else {
+ uniqueName.assignFormat(kShmFormat, (unsigned long long)bits);
+ *fd = shm_open(uniqueName.data(), O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
+ if (ASMJIT_LIKELY(*fd >= 0)) {
+ shm_unlink(uniqueName.data());
+ return kErrorOk;
+ }
+ }
+
+ int e = errno;
+ if (e == EEXIST)
+ continue;
+ else
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ return kErrorOk;
+#endif
+}
+
+#if ASMJIT_VM_SHM_DETECT
+static Error VirtMem_detectShmStrategy(uint32_t* strategyOut) noexcept {
+ ScopedFD fd;
+ VirtMem::Info vmInfo = VirtMem::info();
+
+ ASMJIT_PROPAGATE(VirtMem_openAnonymousMemory(&fd.value, false));
+ if (ftruncate(fd.value, off_t(vmInfo.pageSize)) != 0)
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+
+ void* ptr = mmap(nullptr, vmInfo.pageSize, PROT_READ | PROT_EXEC, MAP_SHARED, fd.value, 0);
+ if (ptr == MAP_FAILED) {
+ int e = errno;
+ if (e == EINVAL) {
+ *strategyOut = kShmStrategyTmpDir;
+ return kErrorOk;
+ }
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ else {
+ munmap(ptr, vmInfo.pageSize);
+ *strategyOut = kShmStrategyDevShm;
+ return kErrorOk;
+ }
+}
+#endif
+
+#if ASMJIT_VM_SHM_DETECT
+static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
+ // Initially don't assume anything. It has to be tested whether
+ // '/dev/shm' was mounted with 'noexec' flag or not.
+ static volatile uint32_t globalShmStrategy = kShmStrategyUnknown;
+
+ uint32_t strategy = globalShmStrategy;
+ if (strategy == kShmStrategyUnknown) {
+ ASMJIT_PROPAGATE(VirtMem_detectShmStrategy(&strategy));
+ globalShmStrategy = strategy;
+ }
+
+ *strategyOut = strategy;
+ return kErrorOk;
+}
+#else
+static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
+ *strategyOut = kShmStrategyTmpDir;
+ return kErrorOk;
+}
+#endif
+
+Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
+ *p = nullptr;
+
+ if (size == 0)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ int protection = VirtMem_accessToPosixProtection(flags);
+ int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | VirtMem_appleSpecificMMapFlags(flags);
+ void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0);
+
+ if (ptr == MAP_FAILED)
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ *p = ptr;
+ return kErrorOk;
+}
+
+Error VirtMem::release(void* p, size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ return kErrorOk;
+}
+
+
+Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
+ int protection = VirtMem_accessToPosixProtection(flags);
+ if (mprotect(p, size, protection) == 0)
+ return kErrorOk;
+
+ return DebugUtils::errored(kErrorInvalidArgument);
+}
+
+Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+
+ if (off_t(size) <= 0)
+ return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
+
+ bool preferTmpOverDevShm = (flags & kMappingPreferTmp) != 0;
+ if (!preferTmpOverDevShm) {
+ uint32_t strategy;
+ ASMJIT_PROPAGATE(VirtMem_getShmStrategy(&strategy));
+ preferTmpOverDevShm = (strategy == kShmStrategyTmpDir);
+ }
+
+ // ScopedFD will automatically close the file descriptor in its destructor.
+ ScopedFD fd;
+ ASMJIT_PROPAGATE(VirtMem_openAnonymousMemory(&fd.value, preferTmpOverDevShm));
+ if (ftruncate(fd.value, off_t(size)) != 0)
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(errno));
+
+ void* ptr[2];
+ for (uint32_t i = 0; i < 2; i++) {
+ ptr[i] = mmap(nullptr, size, VirtMem_accessToPosixProtection(flags & ~VirtMem_dualMappingFilter[i]), MAP_SHARED, fd.value, 0);
+ if (ptr[i] == MAP_FAILED) {
+ // Get the error now before `munmap` has a chance to clobber it.
+ int e = errno;
+ if (i == 1)
+ munmap(ptr[0], size);
+ return DebugUtils::errored(VirtMem_makeErrorFromErrno(e));
+ }
+ }
+
+ dm->ro = ptr[0];
+ dm->rw = ptr[1];
+ return kErrorOk;
+}
+
+Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
+ Error err = release(dm->ro, size);
+ if (dm->ro != dm->rw)
+ err |= release(dm->rw, size);
+
+ if (err)
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ dm->ro = nullptr;
+ dm->rw = nullptr;
+ return kErrorOk;
+}
+#endif
+
+// ============================================================================
+// [asmjit::VirtMem - Virtual Memory [Memory Info]]
+// ============================================================================
+
+VirtMem::Info VirtMem::info() noexcept {
+ static VirtMem::Info vmInfo;
+ static std::atomic<uint32_t> vmInfoInitialized;
+
+ if (!vmInfoInitialized.load()) {
+ VirtMem::Info localMemInfo;
+ VirtMem_getInfo(localMemInfo);
+
+ vmInfo = localMemInfo;
+ vmInfoInitialized.store(1u);
+ }
+
+ return vmInfo;
+}
+
+ASMJIT_END_NAMESPACE
+
+#endif
diff --git a/3rdparty/asmjit/src/asmjit/core/virtmem.h b/3rdparty/asmjit/src/asmjit/core/virtmem.h
new file mode 100644
index 00000000000..a37005d947e
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/virtmem.h
@@ -0,0 +1,145 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_VIRTMEM_H_INCLUDED
+#define ASMJIT_CORE_VIRTMEM_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_JIT
+
+#include "../core/globals.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_jit
+//! \{
+
+// ============================================================================
+// [asmjit::VirtMem]
+// ============================================================================
+
+//! Virtual memory management.
+namespace VirtMem {
+
+//! Virtual memory and memory mapping flags.
+enum Flags : uint32_t {
+ //! No access flags.
+ kAccessNone = 0x00000000u,
+ //! Memory is readable.
+ kAccessRead = 0x00000001u,
+ //! Memory is writable (implies read access).
+ kAccessWrite = 0x00000002u,
+ //! Memory is executable (implies read access).
+ kAccessExecute = 0x00000004u,
+
+ //! A combination of `kAccessRead | kAccessWrite`
+ kAccessReadWrite = 0x00000003u,
+
+ //! Use a `MAP_JIT` flag available on Apple platforms (OSX Mojave+), which
+ //! allows JIT code to be executed in OSX bundles. This flag is not turned
+ //! on by default, because when a process uses `fork()` the child process
+ //! has no access to the pages mapped with `MAP_JIT`, which could break code
+ //! that doesn't expect this behavior.
+ kMMapEnableMapJit = 0x00000010u,
+
+ //! Not an access flag, only used by `allocDualMapping()` to override the
+ //! default allocation strategy to always use a 'tmp' directory instead of
+ //! "/dev/shm" (on POSIX platforms). Please note that this flag will be
+ //! ignored if the operating system allows to allocate an executable memory
+ //! by a different API than `open()` or `shm_open()`. For example on Linux
+ //! `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is
+ //! used if SHM_ANON is defined.
+ kMappingPreferTmp = 0x80000000u
+};
+
+//! Virtual memory information.
+struct Info {
+ //! Virtual memory page size.
+ uint32_t pageSize;
+ //! Virtual memory page granularity.
+ uint32_t pageGranularity;
+};
+
+//! Dual memory mapping used to map an anonymous memory into two memory regions
+//! where one region is read-only, but executable, and the second region is
+//! read+write, but not executable. Please see \ref VirtMem::allocDualMapping()
+//! for more details.
+struct DualMapping {
+ //! Pointer to data with 'Read' or 'Read+Execute' access.
+ void* ro;
+ //! Pointer to data with 'Read-Write' access, but never 'Write+Execute'.
+ void* rw;
+};
+
+//! Returns virtual memory information, see `VirtMem::Info` for more details.
+ASMJIT_API Info info() noexcept;
+
+//! Allocates virtual memory by either using `VirtualAlloc()` (Windows)
+//! or `mmap()` (POSIX).
+//!
+//! \note `size` should be aligned to a page size, use \ref VirtMem::info()
+//! to obtain it. Invalid size will not be corrected by the implementation
+//! and the allocation would not succeed in such case.
+ASMJIT_API Error alloc(void** p, size_t size, uint32_t flags) noexcept;
+
+//! Releases virtual memory previously allocated by \ref VirtMem::alloc() or
+//! \ref VirtMem::allocDualMapping().
+//!
+//! \note The size must be the same as used by \ref VirtMem::alloc(). If the
+//! size is not the same value the call will fail on any POSIX system, but
+//! pass on Windows, because of the difference of the implementation.
+ASMJIT_API Error release(void* p, size_t size) noexcept;
+
+//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect`
+//! (Windows).
+ASMJIT_API Error protect(void* p, size_t size, uint32_t flags) noexcept;
+
+//! Allocates virtual memory and creates two views of it where the first view
+//! has no write access. This is an addition to the API that should be used
+//! in cases in which the operating system either enforces W^X security policy
+//! or the application wants to use this policy by default to improve security
+//! and prevent an accidental (or purposed) self-modifying code.
+//!
+//! The memory returned in the `dm` are two independent mappings of the same
+//! shared memory region. You must use \ref VirtMem::releaseDualMapping() to
+//! release it when it's no longer needed. Never use `VirtMem::release()` to
+//! release the memory returned by `allocDualMapping()` as that would fail on
+//! Windows.
+//!
+//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
+ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept;
+
+//! Releases the virtual memory mapping previously allocated by
+//! \ref VirtMem::allocDualMapping().
+//!
+//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
+ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
+
+} // VirtMem
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif
+#endif // ASMJIT_CORE_VIRTMEM_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zone.cpp b/3rdparty/asmjit/src/asmjit/core/zone.cpp
new file mode 100644
index 00000000000..16de89becec
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zone.cpp
@@ -0,0 +1,382 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::Zone - Statics]
+// ============================================================================
+
+// Zero size block used by `Zone` that doesn't have any memory allocated.
+// Should be allocated in read-only memory and should never be modified.
+const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
+
+// ============================================================================
+// [asmjit::Zone - Init / Reset]
+// ============================================================================
+
+void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
+ ASMJIT_ASSERT(blockSize >= kMinBlockSize);
+ ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
+ ASMJIT_ASSERT(blockAlignment <= 64);
+
+ // Just to make the compiler happy...
+ constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
+ constexpr size_t kBlockAlignmentShiftMask = 0x7u;
+
+ _assignZeroBlock();
+ _blockSize = blockSize & kBlockSizeMask;
+ _isTemporary = temporary != nullptr;
+ _blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
+
+ // Setup the first [temporary] block, if necessary.
+ if (temporary) {
+ Block* block = temporary->data<Block>();
+ block->prev = nullptr;
+ block->next = nullptr;
+
+ ASMJIT_ASSERT(temporary->size() >= kBlockSize);
+ block->size = temporary->size() - kBlockSize;
+
+ _assignBlock(block);
+ }
+}
+
+void Zone::reset(uint32_t resetPolicy) noexcept {
+ Block* cur = _block;
+
+ // Can't be altered.
+ if (cur == &_zeroBlock)
+ return;
+
+ if (resetPolicy == Globals::kResetHard) {
+ Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
+ _ptr = initial->data();
+ _end = initial->data();
+ _block = initial;
+
+ // Since cur can be in the middle of the double-linked list, we have to
+ // traverse both directions (`prev` and `next`) separately to visit all.
+ Block* next = cur->next;
+ do {
+ Block* prev = cur->prev;
+
+ // If this is the first block and this ZoneTmp is temporary then the
+ // first block is statically allocated. We cannot free it and it makes
+ // sense to keep it even when this is hard reset.
+ if (prev == nullptr && _isTemporary) {
+ cur->prev = nullptr;
+ cur->next = nullptr;
+ _assignBlock(cur);
+ break;
+ }
+
+ ::free(cur);
+ cur = prev;
+ } while (cur);
+
+ cur = next;
+ while (cur) {
+ next = cur->next;
+ ::free(cur);
+ cur = next;
+ }
+ }
+ else {
+ while (cur->prev)
+ cur = cur->prev;
+ _assignBlock(cur);
+ }
+}
+
+// ============================================================================
+// [asmjit::Zone - Alloc]
+// ============================================================================
+
+void* Zone::_alloc(size_t size, size_t alignment) noexcept {
+ Block* curBlock = _block;
+ Block* next = curBlock->next;
+
+ size_t rawBlockAlignment = blockAlignment();
+ size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
+
+ // If the `Zone` has been cleared the current block doesn't have to be the
+ // last one. Check if there is a block that can be used instead of allocating
+ // a new one. If there is a `next` block it's completely unused, we don't have
+ // to check for remaining bytes in that case.
+ if (next) {
+ uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
+ uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
+
+ if (size <= (size_t)(end - ptr)) {
+ _block = next;
+ _ptr = ptr + size;
+ _end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
+ return static_cast<void*>(ptr);
+ }
+ }
+
+ size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
+ size_t newSize = Support::max(blockSize(), size);
+
+ // Prevent arithmetic overflow.
+ if (ASMJIT_UNLIKELY(newSize > std::numeric_limits<size_t>::max() - kBlockSize - blockAlignmentOverhead))
+ return nullptr;
+
+ // Allocate new block - we add alignment overhead to `newSize`, which becomes the
+ // new block size, and we also add `kBlockOverhead` to the allocator as it includes
+ // members of `Zone::Block` structure.
+ newSize += blockAlignmentOverhead;
+ Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
+
+ if (ASMJIT_UNLIKELY(!newBlock))
+ return nullptr;
+
+ // Align the pointer to `minimumAlignment` and adjust the size of this block
+ // accordingly. It's the same as using `minimumAlignment - Support::alignUpDiff()`,
+ // just written differently.
+ {
+ newBlock->prev = nullptr;
+ newBlock->next = nullptr;
+ newBlock->size = newSize;
+
+ if (curBlock != &_zeroBlock) {
+ newBlock->prev = curBlock;
+ curBlock->next = newBlock;
+
+ // Does only happen if there is a next block, but the requested memory
+ // can't fit into it. In this case a new buffer is allocated and inserted
+ // between the current block and the next one.
+ if (next) {
+ newBlock->next = next;
+ next->prev = newBlock;
+ }
+ }
+
+ uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
+ uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
+
+ _ptr = ptr + size;
+ _end = end;
+ _block = newBlock;
+
+ ASMJIT_ASSERT(_ptr <= _end);
+ return static_cast<void*>(ptr);
+ }
+}
+
+void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
+ void* p = alloc(size, alignment);
+ if (ASMJIT_UNLIKELY(!p))
+ return p;
+ return memset(p, 0, size);
+}
+
+void* Zone::dup(const void* data, size_t size, bool nullTerminate) noexcept {
+ if (ASMJIT_UNLIKELY(!data || !size))
+ return nullptr;
+
+ ASMJIT_ASSERT(size != std::numeric_limits<size_t>::max());
+ uint8_t* m = allocT<uint8_t>(size + nullTerminate);
+ if (ASMJIT_UNLIKELY(!m)) return nullptr;
+
+ memcpy(m, data, size);
+ if (nullTerminate) m[size] = '\0';
+
+ return static_cast<void*>(m);
+}
+
+char* Zone::sformat(const char* fmt, ...) noexcept {
+ if (ASMJIT_UNLIKELY(!fmt))
+ return nullptr;
+
+ char buf[512];
+ size_t size;
+ va_list ap;
+
+ va_start(ap, fmt);
+ size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
+ va_end(ap);
+
+ buf[size++] = 0;
+ return static_cast<char*>(dup(buf, size));
+}
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Helpers]
+// ============================================================================
+
+#if defined(ASMJIT_BUILD_DEBUG)
+static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
+ ZoneAllocator::DynamicBlock* cur = self->_dynamicBlocks;
+ while (cur) {
+ if (cur == block)
+ return true;
+ cur = cur->next;
+ }
+ return false;
+}
+#endif
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Init / Reset]
+// ============================================================================
+
+void ZoneAllocator::reset(Zone* zone) noexcept {
+ // Free dynamic blocks.
+ DynamicBlock* block = _dynamicBlocks;
+ while (block) {
+ DynamicBlock* next = block->next;
+ ::free(block);
+ block = next;
+ }
+
+ // Zero the entire class and initialize to the given `zone`.
+ memset(this, 0, sizeof(*this));
+ _zone = zone;
+}
+
+// ============================================================================
+// [asmjit::ZoneAllocator - Alloc / Release]
+// ============================================================================
+
+void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ // Use the memory pool only if the requested block has a reasonable size.
+ uint32_t slot;
+ if (_getSlotIndex(size, slot, allocatedSize)) {
+ // Slot reuse.
+ uint8_t* p = reinterpret_cast<uint8_t*>(_slots[slot]);
+ size = allocatedSize;
+
+ if (p) {
+ _slots[slot] = reinterpret_cast<Slot*>(p)->next;
+ return p;
+ }
+
+ _zone->align(kBlockAlignment);
+ p = _zone->ptr();
+ size_t remain = (size_t)(_zone->end() - p);
+
+ if (ASMJIT_LIKELY(remain >= size)) {
+ _zone->setPtr(p + size);
+ return p;
+ }
+ else {
+ // Distribute the remaining memory to suitable slots, if possible.
+ if (remain >= kLoGranularity) {
+ do {
+ size_t distSize = Support::min<size_t>(remain, kLoMaxSize);
+ uint32_t distSlot = uint32_t((distSize - kLoGranularity) / kLoGranularity);
+ ASMJIT_ASSERT(distSlot < kLoCount);
+
+ reinterpret_cast<Slot*>(p)->next = _slots[distSlot];
+ _slots[distSlot] = reinterpret_cast<Slot*>(p);
+
+ p += distSize;
+ remain -= distSize;
+ } while (remain >= kLoGranularity);
+ _zone->setPtr(p);
+ }
+
+ p = static_cast<uint8_t*>(_zone->_alloc(size, kBlockAlignment));
+ if (ASMJIT_UNLIKELY(!p)) {
+ allocatedSize = 0;
+ return nullptr;
+ }
+
+ return p;
+ }
+ }
+ else {
+ // Allocate a dynamic block.
+ size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
+
+ // Handle a possible overflow.
+ if (ASMJIT_UNLIKELY(kBlockOverhead >= std::numeric_limits<size_t>::max() - size))
+ return nullptr;
+
+ void* p = ::malloc(size + kBlockOverhead);
+ if (ASMJIT_UNLIKELY(!p)) {
+ allocatedSize = 0;
+ return nullptr;
+ }
+
+ // Link as first in `_dynamicBlocks` double-linked list.
+ DynamicBlock* block = static_cast<DynamicBlock*>(p);
+ DynamicBlock* next = _dynamicBlocks;
+
+ if (next)
+ next->prev = block;
+
+ block->prev = nullptr;
+ block->next = next;
+ _dynamicBlocks = block;
+
+ // Align the pointer to the guaranteed alignment and store `DynamicBlock`
+ // at the beginning of the memory block, so `_releaseDynamic()` can find it.
+ p = Support::alignUp(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kBlockAlignment);
+ reinterpret_cast<DynamicBlock**>(p)[-1] = block;
+
+ allocatedSize = size;
+ return p;
+ }
+}
+
+void* ZoneAllocator::_allocZeroed(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ void* p = _alloc(size, allocatedSize);
+ if (ASMJIT_UNLIKELY(!p)) return p;
+ return memset(p, 0, allocatedSize);
+}
+
+void ZoneAllocator::_releaseDynamic(void* p, size_t size) noexcept {
+ DebugUtils::unused(size);
+ ASMJIT_ASSERT(isInitialized());
+
+ // Pointer to `DynamicBlock` is stored at [-1].
+ DynamicBlock* block = reinterpret_cast<DynamicBlock**>(p)[-1];
+ ASMJIT_ASSERT(ZoneAllocator_hasDynamicBlock(this, block));
+
+ // Unlink and free.
+ DynamicBlock* prev = block->prev;
+ DynamicBlock* next = block->next;
+
+ if (prev)
+ prev->next = next;
+ else
+ _dynamicBlocks = next;
+
+ if (next)
+ next->prev = prev;
+
+ ::free(block);
+}
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zone.h b/3rdparty/asmjit/src/asmjit/core/zone.h
new file mode 100644
index 00000000000..c426f785c0c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zone.h
@@ -0,0 +1,642 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONE_H_INCLUDED
+#define ASMJIT_CORE_ZONE_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::Zone]
+// ============================================================================
+
+//! Zone memory.
+//!
+//! Zone is an incremental memory allocator that allocates memory by simply
+//! incrementing a pointer. It allocates blocks of memory by using C's `malloc()`,
+//! but divides these blocks into smaller segments requested by calling
+//! `Zone::alloc()` and friends.
+//!
+//! Zone has no function to release the allocated memory. It has to be released
+//! all at once by calling `reset()`. If you need a more friendly allocator that
+//! also supports `release()`, consider using `Zone` with `ZoneAllocator`.
+class Zone {
+public:
+ ASMJIT_NONCOPYABLE(Zone)
+
+ //! \cond INTERNAL
+
+ //! A single block of memory managed by `Zone`.
+ struct Block {
+ inline uint8_t* data() const noexcept {
+ return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(this) + sizeof(*this));
+ }
+
+ //! Link to the previous block.
+ Block* prev;
+ //! Link to the next block.
+ Block* next;
+ //! Size of the block.
+ size_t size;
+ };
+
+ enum Limits : size_t {
+ kBlockSize = sizeof(Block),
+ kBlockOverhead = Globals::kAllocOverhead + kBlockSize,
+
+ kMinBlockSize = 64, // The number is ridiculously small, but still possible.
+ kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 4 - 1),
+ kMinAlignment = 1,
+ kMaxAlignment = 64
+ };
+
+ //! Pointer in the current block.
+ uint8_t* _ptr;
+ //! End of the current block.
+ uint8_t* _end;
+ //! Current block.
+ Block* _block;
+
+ union {
+ struct {
+ //! Default block size.
+ size_t _blockSize : Support::bitSizeOf<size_t>() - 4;
+ //! First block is temporary (ZoneTmp).
+ size_t _isTemporary : 1;
+ //! Block alignment (1 << alignment).
+ size_t _blockAlignmentShift : 3;
+ };
+ size_t _packedData;
+ };
+
+ static ASMJIT_API const Block _zeroBlock;
+
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new Zone.
+ //!
+ //! The `blockSize` parameter describes the default size of the block. If the
+ //! `size` parameter passed to `alloc()` is greater than the default size
+ //! `Zone` will allocate and use a larger block, but it will not change the
+ //! default `blockSize`.
+ //!
+ //! It's not required, but it's good practice to set `blockSize` to a
+ //! reasonable value that depends on the usage of `Zone`. Greater block sizes
+ //! are generally safer and perform better than unreasonably low block sizes.
+ ASMJIT_INLINE explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
+ _init(blockSize, blockAlignment, nullptr);
+ }
+
+ ASMJIT_INLINE Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
+ _init(blockSize, blockAlignment, &temporary);
+ }
+
+ //! Moves an existing `Zone`.
+ //!
+ //! \note You cannot move an existing `ZoneTmp` as it uses embedded storage.
+ //! Attempting to move `ZoneTmp` would result in assertion failure in debug
+ //! mode and undefined behavior in release mode.
+ ASMJIT_INLINE Zone(Zone&& other) noexcept
+ : _ptr(other._ptr),
+ _end(other._end),
+ _block(other._block),
+ _packedData(other._packedData) {
+ ASMJIT_ASSERT(!other.isTemporary());
+ other._block = const_cast<Block*>(&_zeroBlock);
+ other._ptr = other._block->data();
+ other._end = other._block->data();
+ }
+
+ //! Destroys the `Zone` instance.
+ //!
+ //! This will destroy the `Zone` instance and release all blocks of memory
+ //! allocated by it. It performs implicit `reset(Globals::kResetHard)`.
+ ASMJIT_INLINE ~Zone() noexcept { reset(Globals::kResetHard); }
+
+ ASMJIT_API void _init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept;
+
+ //! Resets the `Zone` invalidating all blocks allocated.
+ //!
+ //! See `Globals::ResetPolicy` for more details.
+ ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
+ ASMJIT_INLINE bool isTemporary() const noexcept { return _isTemporary != 0; }
+
+ //! Returns the default block size.
+ ASMJIT_INLINE size_t blockSize() const noexcept { return _blockSize; }
+ //! Returns the default block alignment.
+ ASMJIT_INLINE size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
+ //! Returns remaining size of the current block.
+ ASMJIT_INLINE size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
+
+ //! Returns the current zone cursor (dangerous).
+ //!
+ //! This is a function that can be used to get exclusive access to the current
+ //! block's memory buffer.
+ template<typename T = uint8_t>
+ ASMJIT_INLINE T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
+
+ //! Returns the end of the current zone block, only useful if you use `ptr()`.
+ template<typename T = uint8_t>
+ ASMJIT_INLINE T* end() noexcept { return reinterpret_cast<T*>(_end); }
+
+ //! Sets the current zone pointer to `ptr` (must be within the current block).
+ template<typename T>
+ ASMJIT_INLINE void setPtr(T* ptr) noexcept {
+ uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
+ ASMJIT_ASSERT(p >= _ptr && p <= _end);
+ _ptr = p;
+ }
+
+ //! Sets the end zone pointer to `end` (must be within the current block).
+ template<typename T>
+ ASMJIT_INLINE void setEnd(T* end) noexcept {
+ uint8_t* p = reinterpret_cast<uint8_t*>(end);
+ ASMJIT_ASSERT(p >= _ptr && p <= _end);
+ _end = p;
+ }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ ASMJIT_INLINE void swap(Zone& other) noexcept {
+ // This could lead to a disaster.
+ ASMJIT_ASSERT(!this->isTemporary());
+ ASMJIT_ASSERT(!other.isTemporary());
+
+ std::swap(_ptr, other._ptr);
+ std::swap(_end, other._end);
+ std::swap(_block, other._block);
+ std::swap(_packedData, other._packedData);
+ }
+
+ //! Aligns the current pointer to `alignment`.
+ ASMJIT_INLINE void align(size_t alignment) noexcept {
+ _ptr = Support::min(Support::alignUp(_ptr, alignment), _end);
+ }
+
+ //! Ensures the remaining size is at least equal or greater than `size`.
+ //!
+ //! \note This function doesn't respect any alignment. If you need to ensure
+ //! there is enough room for an aligned allocation you need to call `align()`
+ //! before calling `ensure()`.
+ ASMJIT_INLINE Error ensure(size_t size) noexcept {
+ if (size <= remainingSize())
+ return kErrorOk;
+ else
+ return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
+ }
+
+ ASMJIT_INLINE void _assignBlock(Block* block) noexcept {
+ size_t alignment = blockAlignment();
+ _ptr = Support::alignUp(block->data(), alignment);
+ _end = Support::alignDown(block->data() + block->size, alignment);
+ _block = block;
+ }
+
+ ASMJIT_INLINE void _assignZeroBlock() noexcept {
+ Block* block = const_cast<Block*>(&_zeroBlock);
+ _ptr = block->data();
+ _end = block->data();
+ _block = block;
+ }
+
+ //! \}
+
+ //! \name Allocation
+ //! \{
+
+ //! Allocates the requested memory specified by `size`.
+ //!
+ //! Pointer returned is valid until the `Zone` instance is destroyed or reset
+ //! by calling `reset()`. If you plan to make an instance of C++ from the
+ //! given pointer use placement `new` and `delete` operators:
+ //!
+ //! ```
+ //! using namespace asmjit;
+ //!
+ //! class Object { ... };
+ //!
+ //! // Create Zone with default block size of approximately 65536 bytes.
+ //! Zone zone(65536 - Zone::kBlockOverhead);
+ //!
+ //! // Create your objects using zone object allocating, for example:
+ //! Object* obj = static_cast<Object*>( zone.alloc(sizeof(Object)) );
+ //!
+ //! if (!obj) {
+ //! // Handle out of memory error.
+ //! }
+ //!
+ //! // Placement `new` and `delete` operators can be used to instantiate it.
+ //! new(obj) Object();
+ //!
+ //! // ... lifetime of your objects ...
+ //!
+ //! // To destroy the instance (if required).
+ //! obj->~Object();
+ //!
+ //! // Reset or destroy `Zone`.
+ //! zone.reset();
+ //! ```
+ ASMJIT_INLINE void* alloc(size_t size) noexcept {
+ if (ASMJIT_UNLIKELY(size > remainingSize()))
+ return _alloc(size, 1);
+
+ uint8_t* ptr = _ptr;
+ _ptr += size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` and `alignment`.
+ ASMJIT_INLINE void* alloc(size_t size, size_t alignment) noexcept {
+ ASMJIT_ASSERT(Support::isPowerOf2(alignment));
+ uint8_t* ptr = Support::alignUp(_ptr, alignment);
+
+ if (ptr >= _end || size > (size_t)(_end - ptr))
+ return _alloc(size, alignment);
+
+ _ptr = ptr + size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` without doing any checks.
+ //!
+ //! Can only be called if `remainingSize()` returns size at least equal to `size`.
+ ASMJIT_INLINE void* allocNoCheck(size_t size) noexcept {
+ ASMJIT_ASSERT(remainingSize() >= size);
+
+ uint8_t* ptr = _ptr;
+ _ptr += size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
+ //!
+ //! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
+ ASMJIT_INLINE void* allocNoCheck(size_t size, size_t alignment) noexcept {
+ ASMJIT_ASSERT(Support::isPowerOf2(alignment));
+
+ uint8_t* ptr = Support::alignUp(_ptr, alignment);
+ ASMJIT_ASSERT(size <= (size_t)(_end - ptr));
+
+ _ptr = ptr + size;
+ return static_cast<void*>(ptr);
+ }
+
+ //! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
+ ASMJIT_API void* allocZeroed(size_t size, size_t alignment = 1) noexcept;
+
+ //! Like `alloc()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(alloc(size, alignment));
+ }
+
+ //! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(allocNoCheck(size, alignment));
+ }
+
+ //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
+ return static_cast<T*>(allocZeroed(size, alignment));
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T>
+ ASMJIT_INLINE T* newT() noexcept {
+ void* p = alloc(sizeof(T), alignof(T));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T();
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T, typename... Args>
+ ASMJIT_INLINE T* newT(Args&&... args) noexcept {
+ void* p = alloc(sizeof(T), alignof(T));
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T(std::forward<Args>(args)...);
+ }
+
+ //! \cond INTERNAL
+ //!
+ //! Internal alloc function used by other inlines.
+ ASMJIT_API void* _alloc(size_t size, size_t alignment) noexcept;
+ //! \endcond
+
+ //! Helper to duplicate data.
+ ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
+
+ //! Helper to duplicate data.
+ ASMJIT_INLINE void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
+ align(alignment);
+ return dup(data, size, nullTerminate);
+ }
+
+ //! Helper to duplicate a formatted string, maximum size is 256 bytes.
+ ASMJIT_API char* sformat(const char* str, ...) noexcept;
+
+ //! \}
+};
+
+// ============================================================================
+// [b2d::ZoneTmp]
+// ============================================================================
+
+template<size_t N>
+class ZoneTmp : public Zone {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTmp<N>)
+
+ struct Storage {
+ char data[N];
+ } _storage;
+
+ ASMJIT_INLINE explicit ZoneTmp(size_t blockSize, size_t blockAlignment = 1) noexcept
+ : Zone(blockSize, blockAlignment, Support::Temporary(_storage.data, N)) {}
+};
+
+// ============================================================================
+// [asmjit::ZoneAllocator]
+// ============================================================================
+
+//! Zone-based memory allocator that uses an existing `Zone` and provides a
+//! `release()` functionality on top of it. It uses `Zone` only for chunks
+//! that can be pooled, and uses libc `malloc()` for chunks that are large.
+//!
+//! The advantage of ZoneAllocator is that it can allocate small chunks of memory
+//! really fast, and these chunks, when released, will be reused by consecutive
+//! calls to `alloc()`. Also, since ZoneAllocator uses `Zone`, you can turn any
+//! `Zone` into a `ZoneAllocator`, and use it in your `Pass` when necessary.
+//!
+//! ZoneAllocator is used by AsmJit containers to make containers having only
+//! few elements fast (and lightweight) and to allow them to grow and use
+//! dynamic blocks when require more storage.
+class ZoneAllocator {
+public:
+ ASMJIT_NONCOPYABLE(ZoneAllocator)
+
+ //! \cond INTERNAL
+ enum {
+ // In short, we pool chunks of these sizes:
+ // [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
+
+ //! How many bytes per a low granularity pool (has to be at least 16).
+ kLoGranularity = 32,
+ //! Number of slots of a low granularity pool.
+ kLoCount = 4,
+ //! Maximum size of a block that can be allocated in a low granularity pool.
+ kLoMaxSize = kLoGranularity * kLoCount,
+
+ //! How many bytes per a high granularity pool.
+ kHiGranularity = 64,
+ //! Number of slots of a high granularity pool.
+ kHiCount = 6,
+ //! Maximum size of a block that can be allocated in a high granularity pool.
+ kHiMaxSize = kLoMaxSize + kHiGranularity * kHiCount,
+
+ //! Alignment of every pointer returned by `alloc()`.
+ kBlockAlignment = kLoGranularity
+ };
+
+ //! Single-linked list used to store unused chunks.
+ struct Slot {
+ //! Link to a next slot in a single-linked list.
+ Slot* next;
+ };
+
+ //! A block of memory that has been allocated dynamically and is not part of
+ //! block-list used by the allocator. This is used to keep track of all these
+ //! blocks so they can be freed by `reset()` if not freed explicitly.
+ struct DynamicBlock {
+ DynamicBlock* prev;
+ DynamicBlock* next;
+ };
+
+ //! \endcond
+
+ //! Zone used to allocate memory that fits into slots.
+ Zone* _zone;
+ //! Indexed slots containing released memory.
+ Slot* _slots[kLoCount + kHiCount];
+ //! Dynamic blocks for larger allocations (no slots).
+ DynamicBlock* _dynamicBlocks;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new `ZoneAllocator`.
+ //!
+ //! \note To use it, you must first `init()` it.
+ inline ZoneAllocator() noexcept {
+ memset(this, 0, sizeof(*this));
+ }
+
+ //! Creates a new `ZoneAllocator` initialized to use `zone`.
+ inline explicit ZoneAllocator(Zone* zone) noexcept {
+ memset(this, 0, sizeof(*this));
+ _zone = zone;
+ }
+
+ //! Destroys the `ZoneAllocator`.
+ inline ~ZoneAllocator() noexcept { reset(); }
+
+ //! Tests whether the `ZoneAllocator` is initialized (i.e. has `Zone`).
+ inline bool isInitialized() const noexcept { return _zone != nullptr; }
+
+ //! Convenience function to initialize the `ZoneAllocator` with `zone`.
+ //!
+ //! It's the same as calling `reset(zone)`.
+ inline void init(Zone* zone) noexcept { reset(zone); }
+
+ //! Resets this `ZoneAllocator` and also forget about the current `Zone` which
+ //! is attached (if any). Reset optionally attaches a new `zone` passed, or
+ //! keeps the `ZoneAllocator` in an uninitialized state, if `zone` is null.
+ ASMJIT_API void reset(Zone* zone = nullptr) noexcept;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator`
+ //! is not initialized.
+ inline Zone* zone() const noexcept { return _zone; }
+
+ //! \}
+
+ //! \cond
+ //! \name Internals
+ //! \{
+
+ //! Returns the slot index to be used for `size`. Returns `true` if a valid slot
+ //! has been written to `slot` and `allocatedSize` has been filled with slot
+ //! exact size (`allocatedSize` can be equal or slightly greater than `size`).
+ static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
+ ASMJIT_ASSERT(size > 0);
+ if (size > kHiMaxSize)
+ return false;
+
+ if (size <= kLoMaxSize)
+ slot = uint32_t((size - 1) / kLoGranularity);
+ else
+ slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
+
+ return true;
+ }
+
+ //! \overload
+ static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(size > 0);
+ if (size > kHiMaxSize)
+ return false;
+
+ if (size <= kLoMaxSize) {
+ slot = uint32_t((size - 1) / kLoGranularity);
+ allocatedSize = Support::alignUp(size, kLoGranularity);
+ }
+ else {
+ slot = uint32_t((size - kLoMaxSize - 1) / kHiGranularity) + kLoCount;
+ allocatedSize = Support::alignUp(size, kHiGranularity);
+ }
+
+ return true;
+ }
+
+ //! \}
+ //! \endcond
+
+ //! \name Allocation
+ //! \{
+
+ //! \cond INTERNAL
+ ASMJIT_API void* _alloc(size_t size, size_t& allocatedSize) noexcept;
+ ASMJIT_API void* _allocZeroed(size_t size, size_t& allocatedSize) noexcept;
+ ASMJIT_API void _releaseDynamic(void* p, size_t size) noexcept;
+ //! \endcond
+
+ //! Allocates `size` bytes of memory, ideally from an available pool.
+ //!
+ //! \note `size` can't be zero, it will assert in debug mode in such case.
+ inline void* alloc(size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ size_t allocatedSize;
+ return _alloc(size, allocatedSize);
+ }
+
+ //! Like `alloc(size)`, but provides a second argument `allocatedSize` that
+ //! provides a way to know how big the block returned actually is. This is
+ //! useful for containers to prevent growing too early.
+ inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _alloc(size, allocatedSize);
+ }
+
+ //! Like `alloc()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ inline T* allocT(size_t size = sizeof(T)) noexcept {
+ return static_cast<T*>(alloc(size));
+ }
+
+ //! Like `alloc(size)`, but returns zeroed memory.
+ inline void* allocZeroed(size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ size_t allocatedSize;
+ return _allocZeroed(size, allocatedSize);
+ }
+
+ //! Like `alloc(size, allocatedSize)`, but returns zeroed memory.
+ inline void* allocZeroed(size_t size, size_t& allocatedSize) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _allocZeroed(size, allocatedSize);
+ }
+
+ //! Like `allocZeroed()`, but the return pointer is casted to `T*`.
+ template<typename T>
+ inline T* allocZeroedT(size_t size = sizeof(T)) noexcept {
+ return static_cast<T*>(allocZeroed(size));
+ }
+
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T>
+ inline T* newT() noexcept {
+ void* p = allocT<T>();
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T();
+ }
+ //! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
+ template<typename T, typename... Args>
+ inline T* newT(Args&&... args) noexcept {
+ void* p = allocT<T>();
+ if (ASMJIT_UNLIKELY(!p))
+ return nullptr;
+ return new(p) T(std::forward<Args>(args)...);
+ }
+
+ //! Releases the memory previously allocated by `alloc()`. The `size` argument
+ //! has to be the same as used to call `alloc()` or `allocatedSize` returned
+ //! by `alloc()`.
+ inline void release(void* p, size_t size) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(p != nullptr);
+ ASMJIT_ASSERT(size != 0);
+
+ uint32_t slot;
+ if (_getSlotIndex(size, slot)) {
+ static_cast<Slot*>(p)->next = static_cast<Slot*>(_slots[slot]);
+ _slots[slot] = static_cast<Slot*>(p);
+ }
+ else {
+ _releaseDynamic(p, size);
+ }
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONE_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonehash.cpp b/3rdparty/asmjit/src/asmjit/core/zonehash.cpp
new file mode 100644
index 00000000000..fb48d85c6d2
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonehash.cpp
@@ -0,0 +1,331 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonehash.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Helpers]
+// ============================================================================
+
+#define ASMJIT_POPULATE_PRIMES(ENTRY) \
+ ENTRY(2 , 0x80000000, 32), /* [N * 0x80000000 >> 32] (rcp=2147483648) */ \
+ ENTRY(11 , 0xBA2E8BA3, 35), /* [N * 0xBA2E8BA3 >> 35] (rcp=3123612579) */ \
+ ENTRY(29 , 0x8D3DCB09, 36), /* [N * 0x8D3DCB09 >> 36] (rcp=2369637129) */ \
+ ENTRY(41 , 0xC7CE0C7D, 37), /* [N * 0xC7CE0C7D >> 37] (rcp=3352169597) */ \
+ ENTRY(59 , 0x8AD8F2FC, 37), /* [N * 0x8AD8F2FC >> 37] (rcp=2329473788) */ \
+ ENTRY(83 , 0xC565C87C, 38), /* [N * 0xC565C87C >> 38] (rcp=3311782012) */ \
+ ENTRY(131 , 0xFA232CF3, 39), /* [N * 0xFA232CF3 >> 39] (rcp=4196609267) */ \
+ ENTRY(191 , 0xAB8F69E3, 39), /* [N * 0xAB8F69E3 >> 39] (rcp=2878302691) */ \
+ ENTRY(269 , 0xF3A0D52D, 40), /* [N * 0xF3A0D52D >> 40] (rcp=4087403821) */ \
+ ENTRY(383 , 0xAB1CBDD4, 40), /* [N * 0xAB1CBDD4 >> 40] (rcp=2870787540) */ \
+ ENTRY(541 , 0xF246FACC, 41), /* [N * 0xF246FACC >> 41] (rcp=4064737996) */ \
+ ENTRY(757 , 0xAD2589A4, 41), /* [N * 0xAD2589A4 >> 41] (rcp=2904918436) */ \
+ ENTRY(1061 , 0xF7129426, 42), /* [N * 0xF7129426 >> 42] (rcp=4145189926) */ \
+ ENTRY(1499 , 0xAEE116B7, 42), /* [N * 0xAEE116B7 >> 42] (rcp=2933986999) */ \
+ ENTRY(2099 , 0xF9C7A737, 43), /* [N * 0xF9C7A737 >> 43] (rcp=4190611255) */ \
+ ENTRY(2939 , 0xB263D25C, 43), /* [N * 0xB263D25C >> 43] (rcp=2992886364) */ \
+ ENTRY(4111 , 0xFF10E02E, 44), /* [N * 0xFF10E02E >> 44] (rcp=4279296046) */ \
+ ENTRY(5779 , 0xB5722823, 44), /* [N * 0xB5722823 >> 44] (rcp=3044157475) */ \
+ ENTRY(8087 , 0x81A97405, 44), /* [N * 0x81A97405 >> 44] (rcp=2175366149) */ \
+ ENTRY(11321 , 0xB93E91DB, 45), /* [N * 0xB93E91DB >> 45] (rcp=3107885531) */ \
+ ENTRY(15859 , 0x843CC26B, 45), /* [N * 0x843CC26B >> 45] (rcp=2218574443) */ \
+ ENTRY(22189 , 0xBD06B9EA, 46), /* [N * 0xBD06B9EA >> 46] (rcp=3171334634) */ \
+ ENTRY(31051 , 0x8713F186, 46), /* [N * 0x8713F186 >> 46] (rcp=2266231174) */ \
+ ENTRY(43451 , 0xC10F1CB9, 47), /* [N * 0xC10F1CB9 >> 47] (rcp=3238993081) */ \
+ ENTRY(60869 , 0x89D06A86, 47), /* [N * 0x89D06A86 >> 47] (rcp=2312137350) */ \
+ ENTRY(85159 , 0xC502AF3B, 48), /* [N * 0xC502AF3B >> 48] (rcp=3305287483) */ \
+ ENTRY(102107 , 0xA44F65AE, 48), /* [N * 0xA44F65AE >> 48] (rcp=2756666798) */ \
+ ENTRY(122449 , 0x89038F77, 48), /* [N * 0x89038F77 >> 48] (rcp=2298711927) */ \
+ ENTRY(146819 , 0xE48AF7E9, 49), /* [N * 0xE48AF7E9 >> 49] (rcp=3834312681) */ \
+ ENTRY(176041 , 0xBE9B145B, 49), /* [N * 0xBE9B145B >> 49] (rcp=3197834331) */ \
+ ENTRY(211073 , 0x9EF882BA, 49), /* [N * 0x9EF882BA >> 49] (rcp=2667086522) */ \
+ ENTRY(253081 , 0x849571AB, 49), /* [N * 0x849571AB >> 49] (rcp=2224386475) */ \
+ ENTRY(303469 , 0xDD239C97, 50), /* [N * 0xDD239C97 >> 50] (rcp=3710098583) */ \
+ ENTRY(363887 , 0xB86C196D, 50), /* [N * 0xB86C196D >> 50] (rcp=3094092141) */ \
+ ENTRY(436307 , 0x99CFA4E9, 50), /* [N * 0x99CFA4E9 >> 50] (rcp=2580522217) */ \
+ ENTRY(523177 , 0x804595C0, 50), /* [N * 0x804595C0 >> 50] (rcp=2152043968) */ \
+ ENTRY(627293 , 0xD5F69FCF, 51), /* [N * 0xD5F69FCF >> 51] (rcp=3589709775) */ \
+ ENTRY(752177 , 0xB27063BA, 51), /* [N * 0xB27063BA >> 51] (rcp=2993710010) */ \
+ ENTRY(901891 , 0x94D170AC, 51), /* [N * 0x94D170AC >> 51] (rcp=2496753836) */ \
+ ENTRY(1081369 , 0xF83C9767, 52), /* [N * 0xF83C9767 >> 52] (rcp=4164720487) */ \
+ ENTRY(1296563 , 0xCF09435D, 52), /* [N * 0xCF09435D >> 52] (rcp=3473490781) */ \
+ ENTRY(1554583 , 0xACAC7198, 52), /* [N * 0xACAC7198 >> 52] (rcp=2896982424) */ \
+ ENTRY(1863971 , 0x90033EE3, 52), /* [N * 0x90033EE3 >> 52] (rcp=2416131811) */ \
+ ENTRY(2234923 , 0xF0380EBD, 53), /* [N * 0xF0380EBD >> 53] (rcp=4030205629) */ \
+ ENTRY(2679673 , 0xC859731E, 53), /* [N * 0xC859731E >> 53] (rcp=3361305374) */ \
+ ENTRY(3212927 , 0xA718DE27, 53), /* [N * 0xA718DE27 >> 53] (rcp=2803424807) */ \
+ ENTRY(3852301 , 0x8B5D1B4B, 53), /* [N * 0x8B5D1B4B >> 53] (rcp=2338134859) */ \
+ ENTRY(4618921 , 0xE8774804, 54), /* [N * 0xE8774804 >> 54] (rcp=3900131332) */ \
+ ENTRY(5076199 , 0xD386574E, 54), /* [N * 0xD386574E >> 54] (rcp=3548796750) */ \
+ ENTRY(5578757 , 0xC0783FE1, 54), /* [N * 0xC0783FE1 >> 54] (rcp=3229106145) */ \
+ ENTRY(6131057 , 0xAF21B08F, 54), /* [N * 0xAF21B08F >> 54] (rcp=2938220687) */ \
+ ENTRY(6738031 , 0x9F5AFD6E, 54), /* [N * 0x9F5AFD6E >> 54] (rcp=2673540462) */ \
+ ENTRY(7405163 , 0x90FFC3B9, 54), /* [N * 0x90FFC3B9 >> 54] (rcp=2432680889) */ \
+ ENTRY(8138279 , 0x83EFECFC, 54), /* [N * 0x83EFECFC >> 54] (rcp=2213539068) */ \
+ ENTRY(8943971 , 0xF01AA2EF, 55), /* [N * 0xF01AA2EF >> 55] (rcp=4028277487) */ \
+ ENTRY(9829447 , 0xDA7979B2, 55), /* [N * 0xDA7979B2 >> 55] (rcp=3665394098) */ \
+ ENTRY(10802581 , 0xC6CB2771, 55), /* [N * 0xC6CB2771 >> 55] (rcp=3335202673) */ \
+ ENTRY(11872037 , 0xB4E2C7DD, 55), /* [N * 0xB4E2C7DD >> 55] (rcp=3034761181) */ \
+ ENTRY(13047407 , 0xA4974124, 55), /* [N * 0xA4974124 >> 55] (rcp=2761376036) */ \
+ ENTRY(14339107 , 0x95C39CF1, 55), /* [N * 0x95C39CF1 >> 55] (rcp=2512624881) */ \
+ ENTRY(15758737 , 0x8845C763, 55), /* [N * 0x8845C763 >> 55] (rcp=2286274403) */ \
+ ENTRY(17318867 , 0xF7FE593F, 56), /* [N * 0xF7FE593F >> 56] (rcp=4160641343) */ \
+ ENTRY(19033439 , 0xE1A75D93, 56), /* [N * 0xE1A75D93 >> 56] (rcp=3785842067) */ \
+ ENTRY(20917763 , 0xCD5389B3, 56), /* [N * 0xCD5389B3 >> 56] (rcp=3444804019) */ \
+ ENTRY(22988621 , 0xBAD4841A, 56), /* [N * 0xBAD4841A >> 56] (rcp=3134489626) */ \
+ ENTRY(25264543 , 0xA9FFF2FF, 56), /* [N * 0xA9FFF2FF >> 56] (rcp=2852123391) */ \
+ ENTRY(27765763 , 0x9AAF8BF3, 56), /* [N * 0x9AAF8BF3 >> 56] (rcp=2595195891) */ \
+ ENTRY(30514607 , 0x8CC04E18, 56), /* [N * 0x8CC04E18 >> 56] (rcp=2361413144) */ \
+ ENTRY(33535561 , 0x80127068, 56), /* [N * 0x80127068 >> 56] (rcp=2148692072) */ \
+ ENTRY(36855587 , 0xE911F0BB, 57), /* [N * 0xE911F0BB >> 57] (rcp=3910267067) */ \
+ ENTRY(38661533 , 0xDE2ED7BE, 57), /* [N * 0xDE2ED7BE >> 57] (rcp=3727611838) */ \
+ ENTRY(40555961 , 0xD3CDF2FD, 57), /* [N * 0xD3CDF2FD >> 57] (rcp=3553489661) */ \
+ ENTRY(42543269 , 0xC9E9196C, 57), /* [N * 0xC9E9196C >> 57] (rcp=3387496812) */ \
+ ENTRY(44627909 , 0xC07A9EB6, 57), /* [N * 0xC07A9EB6 >> 57] (rcp=3229261494) */ \
+ ENTRY(46814687 , 0xB77CEF65, 57), /* [N * 0xB77CEF65 >> 57] (rcp=3078418277) */ \
+ ENTRY(49108607 , 0xAEEAC65C, 57), /* [N * 0xAEEAC65C >> 57] (rcp=2934621788) */ \
+ ENTRY(51514987 , 0xA6BF0EF0, 57), /* [N * 0xA6BF0EF0 >> 57] (rcp=2797539056) */ \
+ ENTRY(54039263 , 0x9EF510B5, 57), /* [N * 0x9EF510B5 >> 57] (rcp=2666860725) */ \
+ ENTRY(56687207 , 0x97883B42, 57), /* [N * 0x97883B42 >> 57] (rcp=2542287682) */ \
+ ENTRY(59464897 , 0x907430ED, 57), /* [N * 0x907430ED >> 57] (rcp=2423533805) */ \
+ ENTRY(62378699 , 0x89B4CA91, 57), /* [N * 0x89B4CA91 >> 57] (rcp=2310326929) */ \
+ ENTRY(65435273 , 0x83461568, 57), /* [N * 0x83461568 >> 57] (rcp=2202408296) */ \
+ ENTRY(68641607 , 0xFA489AA8, 58), /* [N * 0xFA489AA8 >> 58] (rcp=4199062184) */ \
+ ENTRY(72005051 , 0xEE97B1C5, 58), /* [N * 0xEE97B1C5 >> 58] (rcp=4002918853) */ \
+ ENTRY(75533323 , 0xE3729293, 58), /* [N * 0xE3729293 >> 58] (rcp=3815936659) */ \
+ ENTRY(79234469 , 0xD8D2BBA3, 58), /* [N * 0xD8D2BBA3 >> 58] (rcp=3637689251) */ \
+ ENTRY(83116967 , 0xCEB1F196, 58), /* [N * 0xCEB1F196 >> 58] (rcp=3467768214) */ \
+ ENTRY(87189709 , 0xC50A4426, 58), /* [N * 0xC50A4426 >> 58] (rcp=3305784358) */ \
+ ENTRY(91462061 , 0xBBD6052B, 58), /* [N * 0xBBD6052B >> 58] (rcp=3151365419) */ \
+ ENTRY(95943737 , 0xB30FD999, 58), /* [N * 0xB30FD999 >> 58] (rcp=3004160409) */ \
+ ENTRY(100644991 , 0xAAB29CED, 58), /* [N * 0xAAB29CED >> 58] (rcp=2863832301) */ \
+ ENTRY(105576619 , 0xA2B96421, 58), /* [N * 0xA2B96421 >> 58] (rcp=2730058785) */ \
+ ENTRY(110749901 , 0x9B1F8434, 58), /* [N * 0x9B1F8434 >> 58] (rcp=2602533940) */ \
+ ENTRY(116176651 , 0x93E08B4A, 58), /* [N * 0x93E08B4A >> 58] (rcp=2480966474) */ \
+ ENTRY(121869317 , 0x8CF837E0, 58), /* [N * 0x8CF837E0 >> 58] (rcp=2365077472) */ \
+ ENTRY(127840913 , 0x86627F01, 58), /* [N * 0x86627F01 >> 58] (rcp=2254601985) */ \
+ ENTRY(134105159 , 0x801B8178, 58), /* [N * 0x801B8178 >> 58] (rcp=2149286264) */ \
+ ENTRY(140676353 , 0xF43F294F, 59), /* [N * 0xF43F294F >> 59] (rcp=4097780047) */ \
+ ENTRY(147569509 , 0xE8D67089, 59), /* [N * 0xE8D67089 >> 59] (rcp=3906367625) */ \
+ ENTRY(154800449 , 0xDDF6243C, 59), /* [N * 0xDDF6243C >> 59] (rcp=3723895868) */ \
+ ENTRY(162385709 , 0xD397E6AE, 59), /* [N * 0xD397E6AE >> 59] (rcp=3549947566) */ \
+ ENTRY(170342629 , 0xC9B5A65A, 59), /* [N * 0xC9B5A65A >> 59] (rcp=3384125018) */ \
+ ENTRY(178689419 , 0xC0499865, 59), /* [N * 0xC0499865 >> 59] (rcp=3226048613) */ \
+ ENTRY(187445201 , 0xB74E35FA, 59), /* [N * 0xB74E35FA >> 59] (rcp=3075356154) */ \
+ ENTRY(196630033 , 0xAEBE3AC1, 59), /* [N * 0xAEBE3AC1 >> 59] (rcp=2931702465) */ \
+ ENTRY(206264921 , 0xA694A37F, 59), /* [N * 0xA694A37F >> 59] (rcp=2794759039) */ \
+ ENTRY(216371963 , 0x9ECCA59F, 59), /* [N * 0x9ECCA59F >> 59] (rcp=2664211871) */ \
+ ENTRY(226974197 , 0x9761B6AE, 59), /* [N * 0x9761B6AE >> 59] (rcp=2539763374) */ \
+ ENTRY(238095983 , 0x904F79A1, 59), /* [N * 0x904F79A1 >> 59] (rcp=2421127585) */ \
+ ENTRY(249762697 , 0x8991CD1F, 59), /* [N * 0x8991CD1F >> 59] (rcp=2308033823) */ \
+ ENTRY(262001071 , 0x8324BCA5, 59), /* [N * 0x8324BCA5 >> 59] (rcp=2200222885) */ \
+ ENTRY(274839137 , 0xFA090732, 60), /* [N * 0xFA090732 >> 60] (rcp=4194895666) */ \
+ ENTRY(288306269 , 0xEE5B16ED, 60), /* [N * 0xEE5B16ED >> 60] (rcp=3998947053) */ \
+ ENTRY(302433337 , 0xE338CE49, 60), /* [N * 0xE338CE49 >> 60] (rcp=3812150857) */ \
+ ENTRY(317252587 , 0xD89BABC0, 60), /* [N * 0xD89BABC0 >> 60] (rcp=3634080704) */ \
+ ENTRY(374358107 , 0xB790EF43, 60), /* [N * 0xB790EF43 >> 60] (rcp=3079728963) */ \
+ ENTRY(441742621 , 0x9B908414, 60), /* [N * 0x9B908414 >> 60] (rcp=2609939476) */ \
+ ENTRY(521256293 , 0x83D596FA, 60), /* [N * 0x83D596FA >> 60] (rcp=2211813114) */ \
+ ENTRY(615082441 , 0xDF72B16E, 61), /* [N * 0xDF72B16E >> 61] (rcp=3748835694) */ \
+ ENTRY(725797313 , 0xBD5CDB3B, 61), /* [N * 0xBD5CDB3B >> 61] (rcp=3176979259) */ \
+ ENTRY(856440829 , 0xA07A14E9, 61), /* [N * 0xA07A14E9 >> 61] (rcp=2692355305) */ \
+ ENTRY(1010600209, 0x87FF5289, 61), /* [N * 0x87FF5289 >> 61] (rcp=2281656969) */ \
+ ENTRY(1192508257, 0xE6810540, 62), /* [N * 0xE6810540 >> 62] (rcp=3867215168) */ \
+ ENTRY(1407159797, 0xC357A480, 62), /* [N * 0xC357A480 >> 62] (rcp=3277300864) */ \
+ ENTRY(1660448617, 0xA58B5B4F, 62), /* [N * 0xA58B5B4F >> 62] (rcp=2777373519) */ \
+ ENTRY(1959329399, 0x8C4AB55F, 62), /* [N * 0x8C4AB55F >> 62] (rcp=2353706335) */ \
+ ENTRY(2312008693, 0xEDC86320, 63), /* [N * 0xEDC86320 >> 63] (rcp=3989332768) */ \
+ ENTRY(2728170257, 0xC982C4D2, 63), /* [N * 0xC982C4D2 >> 63] (rcp=3380790482) */ \
+ ENTRY(3219240923, 0xAAC599B6, 63) /* [N * 0xAAC599B6 >> 63] (rcp=2865076662) */
+
+
+struct HashPrime {
+ //! Prime number
+ uint32_t prime;
+ //! Reciprocal to turn division into multiplication.
+ uint32_t rcp;
+};
+
+static const HashPrime ZoneHash_primeArray[] = {
+ #define E(PRIME, RCP, SHIFT) { PRIME, RCP }
+ ASMJIT_POPULATE_PRIMES(E)
+ #undef E
+};
+
+static const uint8_t ZoneHash_primeShift[] = {
+ #define E(PRIME, RCP, SHIFT) uint8_t(SHIFT)
+ ASMJIT_POPULATE_PRIMES(E)
+ #undef E
+};
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Rehash]
+// ============================================================================
+
+void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexcept {
+ ASMJIT_ASSERT(primeIndex < ASMJIT_ARRAY_SIZE(ZoneHash_primeArray));
+ uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
+
+ ZoneHashNode** oldData = _data;
+ ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(
+ allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
+
+ // We can still store nodes into the table, but it will degrade.
+ if (ASMJIT_UNLIKELY(newData == nullptr))
+ return;
+
+ uint32_t i;
+ uint32_t oldCount = _bucketsCount;
+
+ _data = newData;
+ _bucketsCount = newCount;
+ _bucketsGrow = uint32_t(newCount * 0.9);
+ _rcpValue = ZoneHash_primeArray[primeIndex].rcp;
+ _rcpShift = ZoneHash_primeShift[primeIndex];
+ _primeIndex = uint8_t(primeIndex);
+
+ for (i = 0; i < oldCount; i++) {
+ ZoneHashNode* node = oldData[i];
+ while (node) {
+ ZoneHashNode* next = node->_hashNext;
+ uint32_t hashMod = _calcMod(node->_hashCode);
+
+ node->_hashNext = newData[hashMod];
+ newData[hashMod] = node;
+ node = next;
+ }
+ }
+
+ if (oldData != _embedded)
+ allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
+}
+
+// ============================================================================
+// [asmjit::ZoneHashBase - Ops]
+// ============================================================================
+
+ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
+ uint32_t hashMod = _calcMod(node->_hashCode);
+ ZoneHashNode* next = _data[hashMod];
+
+ node->_hashNext = next;
+ _data[hashMod] = node;
+
+ if (++_size > _bucketsGrow) {
+ uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
+ if (primeIndex > _primeIndex)
+ _rehash(allocator, primeIndex);
+ }
+
+ return node;
+}
+
+ZoneHashNode* ZoneHashBase::_remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
+ DebugUtils::unused(allocator);
+ uint32_t hashMod = _calcMod(node->_hashCode);
+
+ ZoneHashNode** pPrev = &_data[hashMod];
+ ZoneHashNode* p = *pPrev;
+
+ while (p) {
+ if (p == node) {
+ *pPrev = p->_hashNext;
+ _size--;
+ return node;
+ }
+
+ pPrev = &p->_hashNext;
+ p = *pPrev;
+ }
+
+ return nullptr;
+}
+
+// ============================================================================
+// [asmjit::ZoneHash - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+struct MyHashNode : public ZoneHashNode {
+ inline MyHashNode(uint32_t key) noexcept
+ : ZoneHashNode(key),
+ _key(key) {}
+
+ uint32_t _key;
+};
+
+struct MyKeyMatcher {
+ inline MyKeyMatcher(uint32_t key) noexcept
+ : _key(key) {}
+
+ inline uint32_t hashCode() const noexcept { return _key; }
+ inline bool matches(const MyHashNode* node) const noexcept { return node->_key == _key; }
+
+ uint32_t _key;
+};
+
+UNIT(zone_hash) {
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
+
+ Zone zone(4096);
+ ZoneAllocator allocator(&zone);
+
+ ZoneHash<MyHashNode> hashTable;
+
+ uint32_t key;
+ INFO("Inserting %u elements to HashTable", unsigned(kCount));
+ for (key = 0; key < kCount; key++) {
+ hashTable.insert(&allocator, zone.newT<MyHashNode>(key));
+ }
+
+ uint32_t count = kCount;
+ INFO("Removing %u elements from HashTable and validating each operation", unsigned(kCount));
+ do {
+ MyHashNode* node;
+
+ for (key = 0; key < count; key++) {
+ node = hashTable.get(MyKeyMatcher(key));
+ EXPECT(node != nullptr);
+ EXPECT(node->_key == key);
+ }
+
+ {
+ count--;
+ node = hashTable.get(MyKeyMatcher(count));
+ hashTable.remove(&allocator, node);
+
+ node = hashTable.get(MyKeyMatcher(count));
+ EXPECT(node == nullptr);
+ }
+ } while (count);
+
+ EXPECT(hashTable.empty());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zonehash.h b/3rdparty/asmjit/src/asmjit/core/zonehash.h
new file mode 100644
index 00000000000..bdc1da76027
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonehash.h
@@ -0,0 +1,217 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONEHASH_H_INCLUDED
+#define ASMJIT_CORE_ZONEHASH_H_INCLUDED
+
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneHashNode]
+// ============================================================================
+
+//! Node used by `ZoneHash<>` template.
+//!
+//! You must provide function `bool eq(const Key& key)` in order to make
+//! `ZoneHash::get()` working.
+class ZoneHashNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHashNode)
+
+ inline ZoneHashNode(uint32_t hashCode = 0) noexcept
+ : _hashNext(nullptr),
+ _hashCode(hashCode),
+ _customData(0) {}
+
+ //! Next node in the chain, null if it terminates the chain.
+ ZoneHashNode* _hashNext;
+ //! Precalculated hash-code of key.
+ uint32_t _hashCode;
+ //! Padding, can be reused by any Node that inherits `ZoneHashNode`.
+ uint32_t _customData;
+};
+
+// ============================================================================
+// [asmjit::ZoneHashBase]
+// ============================================================================
+
+class ZoneHashBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHashBase)
+
+ //! Buckets data.
+ ZoneHashNode** _data;
+ //! Count of records inserted into the hash table.
+ size_t _size;
+ //! Count of hash buckets.
+ uint32_t _bucketsCount;
+ //! When buckets array should grow (only checked after insertion).
+ uint32_t _bucketsGrow;
+ //! Reciprocal value of `_bucketsCount`.
+ uint32_t _rcpValue;
+ //! How many bits to shift right when hash is multiplied with `_rcpValue`.
+ uint8_t _rcpShift;
+ //! Prime value index in internal prime array.
+ uint8_t _primeIndex;
+
+ //! Embedded data, used by empty hash tables.
+ ZoneHashNode* _embedded[1];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneHashBase() noexcept {
+ reset();
+ }
+
+ inline ZoneHashBase(ZoneHashBase&& other) noexcept {
+ _data = other._data;
+ _size = other._size;
+ _bucketsCount = other._bucketsCount;
+ _bucketsGrow = other._bucketsGrow;
+ _rcpValue = other._rcpValue;
+ _rcpShift = other._rcpShift;
+ _primeIndex = other._primeIndex;
+ _embedded[0] = other._embedded[0];
+
+ if (_data == other._embedded) _data = _embedded;
+ }
+
+ inline void reset() noexcept {
+ _data = _embedded;
+ _size = 0;
+ _bucketsCount = 1;
+ _bucketsGrow = 1;
+ _rcpValue = 1;
+ _rcpShift = 0;
+ _primeIndex = 0;
+ _embedded[0] = nullptr;
+ }
+
+ inline void release(ZoneAllocator* allocator) noexcept {
+ ZoneHashNode** oldData = _data;
+ if (oldData != _embedded)
+ allocator->release(oldData, _bucketsCount * sizeof(ZoneHashNode*));
+ reset();
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _size == 0; }
+ inline size_t size() const noexcept { return _size; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void _swap(ZoneHashBase& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_bucketsCount, other._bucketsCount);
+ std::swap(_bucketsGrow, other._bucketsGrow);
+ std::swap(_rcpValue, other._rcpValue);
+ std::swap(_rcpShift, other._rcpShift);
+ std::swap(_primeIndex, other._primeIndex);
+ std::swap(_embedded[0], other._embedded[0]);
+
+ if (_data == other._embedded) _data = _embedded;
+ if (other._data == _embedded) other._data = other._embedded;
+ }
+
+ //! \cond INTERNAL
+ inline uint32_t _calcMod(uint32_t hash) const noexcept {
+ uint32_t x = uint32_t((uint64_t(hash) * _rcpValue) >> _rcpShift);
+ return hash - x * _bucketsCount;
+ }
+
+ ASMJIT_API void _rehash(ZoneAllocator* allocator, uint32_t newCount) noexcept;
+ ASMJIT_API ZoneHashNode* _insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
+ ASMJIT_API ZoneHashNode* _remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept;
+ //! \endcond
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneHash]
+// ============================================================================
+
+//! Low-level hash table specialized for storing string keys and POD values.
+//!
+//! This hash table allows duplicates to be inserted (the API is so low
+//! level that it's up to you if you allow it or not, as you should first
+//! `get()` the node and then modify it or insert a new node by using `insert()`,
+//! depending on the intention).
+template<typename NodeT>
+class ZoneHash : public ZoneHashBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneHash<NodeT>)
+
+ typedef NodeT Node;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneHash() noexcept
+ : ZoneHashBase() {}
+
+ inline ZoneHash(ZoneHash&& other) noexcept
+ : ZoneHash(other) {}
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneHash& other) noexcept { ZoneHashBase::_swap(other); }
+
+ template<typename KeyT>
+ inline NodeT* get(const KeyT& key) const noexcept {
+ uint32_t hashMod = _calcMod(key.hashCode());
+ NodeT* node = static_cast<NodeT*>(_data[hashMod]);
+
+ while (node && !key.matches(node))
+ node = static_cast<NodeT*>(node->_hashNext);
+ return node;
+ }
+
+ inline NodeT* insert(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_insert(allocator, node)); }
+ inline NodeT* remove(ZoneAllocator* allocator, NodeT* node) noexcept { return static_cast<NodeT*>(_remove(allocator, node)); }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONEHASH_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonelist.cpp b/3rdparty/asmjit/src/asmjit/core/zonelist.cpp
new file mode 100644
index 00000000000..3496aa8a2b9
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonelist.cpp
@@ -0,0 +1,182 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/zone.h"
+#include "../core/zonelist.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneList - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+class MyListNode : public ZoneListNode<MyListNode> {};
+
+UNIT(zone_list) {
+ Zone zone(4096);
+ ZoneList<MyListNode> list;
+
+ MyListNode* a = zone.newT<MyListNode>();
+ MyListNode* b = zone.newT<MyListNode>();
+ MyListNode* c = zone.newT<MyListNode>();
+ MyListNode* d = zone.newT<MyListNode>();
+
+ INFO("Append / Unlink");
+
+ // []
+ EXPECT(list.empty() == true);
+
+ // [A]
+ list.append(a);
+ EXPECT(list.empty() == false);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == a);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+
+ // [A, B]
+ list.append(b);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == b);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == b);
+ EXPECT(b->prev() == a);
+ EXPECT(b->next() == nullptr);
+
+ // [A, B, C]
+ list.append(c);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == c);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == b);
+ EXPECT(b->prev() == a);
+ EXPECT(b->next() == c);
+ EXPECT(c->prev() == b);
+ EXPECT(c->next() == nullptr);
+
+ // [B, C]
+ list.unlink(a);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == c);
+ EXPECT(c->prev() == b);
+ EXPECT(c->next() == nullptr);
+
+ // [B]
+ list.unlink(c);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == b);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+ EXPECT(c->prev() == nullptr);
+ EXPECT(c->next() == nullptr);
+
+ // []
+ list.unlink(b);
+ EXPECT(list.empty() == true);
+ EXPECT(list.first() == nullptr);
+ EXPECT(list.last() == nullptr);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+
+ INFO("Prepend / Unlink");
+
+ // [A]
+ list.prepend(a);
+ EXPECT(list.empty() == false);
+ EXPECT(list.first() == a);
+ EXPECT(list.last() == a);
+ EXPECT(a->prev() == nullptr);
+ EXPECT(a->next() == nullptr);
+
+ // [B, A]
+ list.prepend(b);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == a);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == a);
+ EXPECT(a->prev() == b);
+ EXPECT(a->next() == nullptr);
+
+ INFO("InsertAfter / InsertBefore");
+
+ // [B, A, C]
+ list.insertAfter(a, c);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == a);
+ EXPECT(a->prev() == b);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ // [B, D, A, C]
+ list.insertBefore(a, d);
+ EXPECT(list.first() == b);
+ EXPECT(list.last() == c);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == d);
+ EXPECT(d->prev() == b);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ INFO("PopFirst / Pop");
+
+ // [D, A, C]
+ EXPECT(list.popFirst() == b);
+ EXPECT(b->prev() == nullptr);
+ EXPECT(b->next() == nullptr);
+
+ EXPECT(list.first() == d);
+ EXPECT(list.last() == c);
+ EXPECT(d->prev() == nullptr);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == c);
+ EXPECT(c->prev() == a);
+ EXPECT(c->next() == nullptr);
+
+ // [D, A]
+ EXPECT(list.pop() == c);
+ EXPECT(c->prev() == nullptr);
+ EXPECT(c->next() == nullptr);
+
+ EXPECT(list.first() == d);
+ EXPECT(list.last() == a);
+ EXPECT(d->prev() == nullptr);
+ EXPECT(d->next() == a);
+ EXPECT(a->prev() == d);
+ EXPECT(a->next() == nullptr);
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zonelist.h b/3rdparty/asmjit/src/asmjit/core/zonelist.h
new file mode 100644
index 00000000000..9d300b03606
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonelist.h
@@ -0,0 +1,203 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONELIST_H_INCLUDED
+#define ASMJIT_CORE_ZONELIST_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneListNode]
+// ============================================================================
+
+template<typename NodeT>
+class ZoneListNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneListNode)
+
+ NodeT* _listNodes[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneListNode() noexcept
+ : _listNodes { nullptr, nullptr } {}
+
+ inline ZoneListNode(ZoneListNode&& other) noexcept
+ : _listNodes { other._listNodes[0], other._listNodes[1] } {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool hasPrev() const noexcept { return _listNodes[Globals::kLinkPrev] != nullptr; }
+ inline bool hasNext() const noexcept { return _listNodes[Globals::kLinkNext] != nullptr; }
+
+ inline NodeT* prev() const noexcept { return _listNodes[Globals::kLinkPrev]; }
+ inline NodeT* next() const noexcept { return _listNodes[Globals::kLinkNext]; }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneList<T>]
+// ============================================================================
+
+template <typename NodeT>
+class ZoneList {
+public:
+ ASMJIT_NONCOPYABLE(ZoneList)
+
+ NodeT* _bounds[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneList() noexcept
+ : _bounds { nullptr, nullptr } {}
+
+ inline ZoneList(ZoneList&& other) noexcept
+ : _bounds { other._bounds[0], other._bounds[1] } {}
+
+ inline void reset() noexcept {
+ _bounds[0] = nullptr;
+ _bounds[1] = nullptr;
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _bounds[0] == nullptr; }
+ inline NodeT* first() const noexcept { return _bounds[Globals::kLinkFirst]; }
+ inline NodeT* last() const noexcept { return _bounds[Globals::kLinkLast]; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneList& other) noexcept {
+ std::swap(_bounds[0], other._bounds[0]);
+ std::swap(_bounds[1], other._bounds[1]);
+ }
+
+ // Can be used to both prepend and append.
+ inline void _addNode(NodeT* node, size_t dir) noexcept {
+ NodeT* prev = _bounds[dir];
+
+ node->_listNodes[!dir] = prev;
+ _bounds[dir] = node;
+ if (prev)
+ prev->_listNodes[dir] = node;
+ else
+ _bounds[!dir] = node;
+ }
+
+ // Can be used to both prepend and append.
+ inline void _insertNode(NodeT* ref, NodeT* node, size_t dir) noexcept {
+ ASMJIT_ASSERT(ref != nullptr);
+
+ NodeT* prev = ref;
+ NodeT* next = ref->_listNodes[dir];
+
+ prev->_listNodes[dir] = node;
+ if (next)
+ next->_listNodes[!dir] = node;
+ else
+ _bounds[dir] = node;
+
+ node->_listNodes[!dir] = prev;
+ node->_listNodes[ dir] = next;
+ }
+
+ inline void append(NodeT* node) noexcept { _addNode(node, Globals::kLinkLast); }
+ inline void prepend(NodeT* node) noexcept { _addNode(node, Globals::kLinkFirst); }
+
+ inline void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkNext); }
+ inline void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkPrev); }
+
+ inline NodeT* unlink(NodeT* node) noexcept {
+ NodeT* prev = node->prev();
+ NodeT* next = node->next();
+
+ if (prev) { prev->_listNodes[1] = next; node->_listNodes[0] = nullptr; } else { _bounds[0] = next; }
+ if (next) { next->_listNodes[0] = prev; node->_listNodes[1] = nullptr; } else { _bounds[1] = prev; }
+
+ node->_listNodes[0] = nullptr;
+ node->_listNodes[1] = nullptr;
+
+ return node;
+ }
+
+ inline NodeT* popFirst() noexcept {
+ NodeT* node = _bounds[0];
+ ASMJIT_ASSERT(node != nullptr);
+
+ NodeT* next = node->next();
+ _bounds[0] = next;
+
+ if (next) {
+ next->_listNodes[0] = nullptr;
+ node->_listNodes[1] = nullptr;
+ }
+ else {
+ _bounds[1] = nullptr;
+ }
+
+ return node;
+ }
+
+ inline NodeT* pop() noexcept {
+ NodeT* node = _bounds[1];
+ ASMJIT_ASSERT(node != nullptr);
+
+ NodeT* prev = node->prev();
+ _bounds[1] = prev;
+
+ if (prev) {
+ prev->_listNodes[1] = nullptr;
+ node->_listNodes[0] = nullptr;
+ }
+ else {
+ _bounds[0] = nullptr;
+ }
+
+ return node;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONELIST_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonestack.cpp b/3rdparty/asmjit/src/asmjit/core/zonestack.cpp
new file mode 100644
index 00000000000..52841b5d396
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonestack.cpp
@@ -0,0 +1,197 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/zone.h"
+#include "../core/zonestack.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneStackBase - Init / Reset]
+// ============================================================================
+
+Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcept {
+ ZoneAllocator* oldAllocator = _allocator;
+
+ if (oldAllocator) {
+ Block* block = _block[Globals::kLinkFirst];
+ while (block) {
+ Block* next = block->next();
+ oldAllocator->release(block, kBlockSize);
+ block = next;
+ }
+
+ _allocator = nullptr;
+ _block[Globals::kLinkLeft] = nullptr;
+ _block[Globals::kLinkRight] = nullptr;
+ }
+
+ if (allocator) {
+ Block* block = static_cast<Block*>(allocator->alloc(kBlockSize));
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->_link[Globals::kLinkLeft] = nullptr;
+ block->_link[Globals::kLinkRight] = nullptr;
+ block->_start = (uint8_t*)block + middleIndex;
+ block->_end = (uint8_t*)block + middleIndex;
+
+ _allocator = allocator;
+ _block[Globals::kLinkLeft] = block;
+ _block[Globals::kLinkRight] = block;
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ZoneStackBase - Ops]
+// ============================================================================
+
+Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+
+ Block* prev = _block[side];
+ ASMJIT_ASSERT(!prev->empty());
+
+ Block* block = _allocator->allocT<Block>(kBlockSize);
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ block->_link[ side] = nullptr;
+ block->_link[!side] = prev;
+ block->_start = (uint8_t*)block + initialIndex;
+ block->_end = (uint8_t*)block + initialIndex;
+
+ prev->_link[side] = block;
+ _block[side] = block;
+
+ return kErrorOk;
+}
+
+void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
+ Block* block = _block[side];
+ ASMJIT_ASSERT(block->empty());
+
+ Block* prev = block->_link[!side];
+ if (prev) {
+ ASMJIT_ASSERT(prev->_link[side] == block);
+ _allocator->release(block, kBlockSize);
+
+ prev->_link[side] = nullptr;
+ _block[side] = prev;
+ }
+ else if (_block[!side] == block) {
+ // If the container becomes empty center both pointers in the remaining block.
+ block->_start = (uint8_t*)block + middleIndex;
+ block->_end = (uint8_t*)block + middleIndex;
+ }
+}
+
+// ============================================================================
+// [asmjit::ZoneStack - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void test_zone_stack(ZoneAllocator* allocator, const char* typeName) {
+ ZoneStack<T> stack;
+
+ INFO("Testing ZoneStack<%s>", typeName);
+ INFO(" (%d items per one Block)", ZoneStack<T>::kNumBlockItems);
+
+ EXPECT(stack.init(allocator) == kErrorOk);
+ EXPECT(stack.empty(), "Stack must be empty after `init()`");
+
+ EXPECT(stack.append(42) == kErrorOk);
+ EXPECT(!stack.empty() , "Stack must not be empty after an item has been appended");
+ EXPECT(stack.pop() == 42 , "Stack.pop() must return the item that has been appended last");
+ EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
+
+ EXPECT(stack.prepend(43) == kErrorOk);
+ EXPECT(!stack.empty() , "Stack must not be empty after an item has been prepended");
+ EXPECT(stack.popFirst() == 43, "Stack.popFirst() must return the item that has been prepended last");
+ EXPECT(stack.empty() , "Stack must be empty after the last item has been removed");
+
+ int i;
+ int iMin =-100000;
+ int iMax = 100000;
+
+ INFO("Validating prepend() & popFirst()");
+ for (i = iMax; i >= 0; i--) stack.prepend(T(i));
+ for (i = 0; i <= iMax; i++) {
+ T item = stack.popFirst();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ if (!stack.empty()) {
+ item = stack.popFirst();
+ EXPECT(i + 1 == item, "Item '%d' didn't match the item '%lld' popped", i + 1, (long long)item);
+ stack.prepend(item);
+ }
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append() & pop()");
+ for (i = 0; i <= iMax; i++) stack.append(T(i));
+ for (i = iMax; i >= 0; i--) {
+ T item = stack.pop();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ if (!stack.empty()) {
+ item = stack.pop();
+ EXPECT(i - 1 == item, "Item '%d' didn't match the item '%lld' popped", i - 1, (long long)item);
+ stack.append(item);
+ }
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append()/prepend() & popFirst()");
+ for (i = 1; i <= iMax; i++) stack.append(T(i));
+ for (i = 0; i >= iMin; i--) stack.prepend(T(i));
+
+ for (i = iMin; i <= iMax; i++) {
+ T item = stack.popFirst();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ }
+ EXPECT(stack.empty());
+
+ INFO("Validating append()/prepend() & pop()");
+ for (i = 0; i >= iMin; i--) stack.prepend(T(i));
+ for (i = 1; i <= iMax; i++) stack.append(T(i));
+
+ for (i = iMax; i >= iMin; i--) {
+ T item = stack.pop();
+ EXPECT(i == item, "Item '%d' didn't match the item '%lld' popped", i, (long long)item);
+ }
+ EXPECT(stack.empty());
+}
+
+UNIT(zone_stack) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+ ZoneAllocator allocator(&zone);
+
+ test_zone_stack<int>(&allocator, "int");
+ test_zone_stack<int64_t>(&allocator, "int64_t");
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zonestack.h b/3rdparty/asmjit/src/asmjit/core/zonestack.h
new file mode 100644
index 00000000000..3c7b5ce0c45
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonestack.h
@@ -0,0 +1,234 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONESTACK_H_INCLUDED
+#define ASMJIT_CORE_ZONESTACK_H_INCLUDED
+
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneStackBase]
+// ============================================================================
+
+//! Base class used by `ZoneStack<T>`.
+class ZoneStackBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneStackBase)
+
+ static constexpr uint32_t kBlockSize = ZoneAllocator::kHiMaxSize;
+
+ struct Block {
+ inline bool empty() const noexcept { return _start == _end; }
+ inline Block* prev() const noexcept { return _link[Globals::kLinkLeft]; }
+ inline Block* next() const noexcept { return _link[Globals::kLinkRight]; }
+
+ inline void setPrev(Block* block) noexcept { _link[Globals::kLinkLeft] = block; }
+ inline void setNext(Block* block) noexcept { _link[Globals::kLinkRight] = block; }
+
+ template<typename T>
+ inline T* start() const noexcept { return static_cast<T*>(_start); }
+ template<typename T>
+ inline void setStart(T* start) noexcept { _start = static_cast<void*>(start); }
+
+ template<typename T>
+ inline T* end() const noexcept { return (T*)_end; }
+ template<typename T>
+ inline void setEnd(T* end) noexcept { _end = (void*)end; }
+
+ template<typename T>
+ inline T* data() const noexcept { return (T*)((uint8_t*)(this) + sizeof(Block)); }
+
+ template<typename T>
+ inline bool canPrepend() const noexcept { return _start > data<void>(); }
+
+ template<typename T>
+ inline bool canAppend() const noexcept {
+ size_t kNumBlockItems = (kBlockSize - sizeof(Block)) / sizeof(T);
+ size_t kStartBlockIndex = sizeof(Block);
+ size_t kEndBlockIndex = kStartBlockIndex + kNumBlockItems * sizeof(T);
+
+ return (uintptr_t)_end <= ((uintptr_t)this + kEndBlockIndex - sizeof(T));
+ }
+
+ Block* _link[Globals::kLinkCount]; //!< Next and previous blocks.
+ void* _start; //!< Pointer to the start of the array.
+ void* _end; //!< Pointer to the end of the array.
+ };
+
+ //! Allocator used to allocate data.
+ ZoneAllocator* _allocator;
+ //! First and last blocks.
+ Block* _block[Globals::kLinkCount];
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline ZoneStackBase() noexcept {
+ _allocator = nullptr;
+ _block[0] = nullptr;
+ _block[1] = nullptr;
+ }
+ inline ~ZoneStackBase() noexcept { reset(); }
+
+ inline bool isInitialized() const noexcept { return _allocator != nullptr; }
+ ASMJIT_API Error _init(ZoneAllocator* allocator, size_t middleIndex) noexcept;
+ inline Error reset() noexcept { return _init(nullptr, 0); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns `ZoneAllocator` attached to this container.
+ inline ZoneAllocator* allocator() const noexcept { return _allocator; }
+
+ inline bool empty() const noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ return _block[0]->start<void>() == _block[1]->end<void>();
+ }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ ASMJIT_API Error _prepareBlock(uint32_t side, size_t initialIndex) noexcept;
+ ASMJIT_API void _cleanupBlock(uint32_t side, size_t middleIndex) noexcept;
+
+ //! \}
+ //! \endcond
+};
+
+// ============================================================================
+// [asmjit::ZoneStack<T>]
+// ============================================================================
+
+//! Zone allocated stack container.
+template<typename T>
+class ZoneStack : public ZoneStackBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneStack<T>)
+
+ enum : uint32_t {
+ kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)),
+ kStartBlockIndex = uint32_t(sizeof(Block)),
+ kMidBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems / 2) * sizeof(T)),
+ kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
+ };
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline ZoneStack() noexcept {}
+ inline ~ZoneStack() noexcept {}
+
+ inline Error init(ZoneAllocator* allocator) noexcept { return _init(allocator, kMidBlockIndex); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ ASMJIT_INLINE Error prepend(T item) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ Block* block = _block[Globals::kLinkFirst];
+
+ if (!block->canPrepend<T>()) {
+ ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkFirst, kEndBlockIndex));
+ block = _block[Globals::kLinkFirst];
+ }
+
+ T* ptr = block->start<T>() - 1;
+ ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
+ *ptr = item;
+ block->setStart<T>(ptr);
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE Error append(T item) noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ Block* block = _block[Globals::kLinkLast];
+
+ if (!block->canAppend<T>()) {
+ ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkLast, kStartBlockIndex));
+ block = _block[Globals::kLinkLast];
+ }
+
+ T* ptr = block->end<T>();
+ ASMJIT_ASSERT(ptr >= block->data<T>() && ptr <= block->data<T>() + (kNumBlockItems - 1));
+
+ *ptr++ = item;
+ block->setEnd(ptr);
+ return kErrorOk;
+ }
+
+ ASMJIT_INLINE T popFirst() noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(!empty());
+
+ Block* block = _block[Globals::kLinkFirst];
+ ASMJIT_ASSERT(!block->empty());
+
+ T* ptr = block->start<T>();
+ T item = *ptr++;
+
+ block->setStart(ptr);
+ if (block->empty())
+ _cleanupBlock(Globals::kLinkFirst, kMidBlockIndex);
+
+ return item;
+ }
+
+ ASMJIT_INLINE T pop() noexcept {
+ ASMJIT_ASSERT(isInitialized());
+ ASMJIT_ASSERT(!empty());
+
+ Block* block = _block[Globals::kLinkLast];
+ ASMJIT_ASSERT(!block->empty());
+
+ T* ptr = block->end<T>();
+ T item = *--ptr;
+ ASMJIT_ASSERT(ptr >= block->data<T>());
+ ASMJIT_ASSERT(ptr >= block->start<T>());
+
+ block->setEnd(ptr);
+ if (block->empty())
+ _cleanupBlock(Globals::kLinkLast, kMidBlockIndex);
+
+ return item;
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONESTACK_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonestring.h b/3rdparty/asmjit/src/asmjit/core/zonestring.h
new file mode 100644
index 00000000000..dfb06b4c754
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonestring.h
@@ -0,0 +1,125 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_SMALLSTRING_H_INCLUDED
+#define ASMJIT_CORE_SMALLSTRING_H_INCLUDED
+
+#include "../core/globals.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneStringBase]
+// ============================================================================
+
+struct ZoneStringBase {
+ union {
+ struct {
+ uint32_t _size;
+ char _embedded[sizeof(void*) * 2 - 4];
+ };
+ struct {
+ void* _dummy;
+ char* _external;
+ };
+ };
+
+ inline void reset() noexcept {
+ _dummy = nullptr;
+ _external = nullptr;
+ }
+
+ Error setData(Zone* zone, uint32_t maxEmbeddedSize, const char* str, size_t size) noexcept {
+ if (size == SIZE_MAX)
+ size = strlen(str);
+
+ if (size <= maxEmbeddedSize) {
+ memcpy(_embedded, str, size);
+ _embedded[size] = '\0';
+ }
+ else {
+ char* external = static_cast<char*>(zone->dup(str, size, true));
+ if (ASMJIT_UNLIKELY(!external))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ _external = external;
+ }
+
+ _size = uint32_t(size);
+ return kErrorOk;
+ }
+};
+
+// ============================================================================
+// [asmjit::ZoneString<N>]
+// ============================================================================
+
+//! Small string is a template that helps to create strings that can be either
+//! statically allocated if they are small, or externally allocated in case
+//! their size exceeds the limit. The `N` represents the size of the whole
+//! `ZoneString` structure, based on that size the maximum size of the internal
+//! buffer is determined.
+template<size_t N>
+class ZoneString {
+public:
+ static constexpr uint32_t kWholeSize =
+ (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
+ static constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
+
+ union {
+ ZoneStringBase _base;
+ char _wholeData[kWholeSize];
+ };
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneString() noexcept { reset(); }
+ inline void reset() noexcept { _base.reset(); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline const char* data() const noexcept { return _base._size <= kMaxEmbeddedSize ? _base._embedded : _base._external; }
+ inline bool empty() const noexcept { return _base._size == 0; }
+ inline uint32_t size() const noexcept { return _base._size; }
+
+ inline bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
+
+ inline Error setData(Zone* zone, const char* data, size_t size) noexcept {
+ return _base.setData(zone, kMaxEmbeddedSize, data, size);
+ }
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_SMALLSTRING_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonetree.cpp b/3rdparty/asmjit/src/asmjit/core/zonetree.cpp
new file mode 100644
index 00000000000..a16f0928f8d
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonetree.cpp
@@ -0,0 +1,118 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonetree.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneTree - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename NodeT>
+struct ZoneRBUnit {
+ typedef ZoneTree<NodeT> Tree;
+
+ static void verifyTree(Tree& tree) noexcept {
+ EXPECT(checkHeight(static_cast<NodeT*>(tree._root)) > 0);
+ }
+
+ // Check whether the Red-Black tree is valid.
+ static int checkHeight(NodeT* node) noexcept {
+ if (!node) return 1;
+
+ NodeT* ln = node->left();
+ NodeT* rn = node->right();
+
+ // Invalid tree.
+ EXPECT(ln == nullptr || *ln < *node);
+ EXPECT(rn == nullptr || *rn > *node);
+
+ // Red violation.
+ EXPECT(!node->isRed() ||
+ (!ZoneTreeNode::_isValidRed(ln) && !ZoneTreeNode::_isValidRed(rn)));
+
+ // Black violation.
+ int lh = checkHeight(ln);
+ int rh = checkHeight(rn);
+ EXPECT(!lh || !rh || lh == rh);
+
+ // Only count black links.
+ return (lh && rh) ? lh + !node->isRed() : 0;
+ }
+};
+
+class MyRBNode : public ZoneTreeNodeT<MyRBNode> {
+public:
+ ASMJIT_NONCOPYABLE(MyRBNode)
+
+ inline explicit MyRBNode(uint32_t key) noexcept
+ : _key(key) {}
+
+ inline bool operator<(const MyRBNode& other) const noexcept { return _key < other._key; }
+ inline bool operator>(const MyRBNode& other) const noexcept { return _key > other._key; }
+
+ inline bool operator<(uint32_t queryKey) const noexcept { return _key < queryKey; }
+ inline bool operator>(uint32_t queryKey) const noexcept { return _key > queryKey; }
+
+ uint32_t _key;
+};
+
+UNIT(zone_rbtree) {
+ uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
+
+ Zone zone(4096);
+ ZoneTree<MyRBNode> rbTree;
+
+ uint32_t key;
+ INFO("Inserting %u elements to RBTree and validating each operation", unsigned(kCount));
+ for (key = 0; key < kCount; key++) {
+ rbTree.insert(zone.newT<MyRBNode>(key));
+ ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
+ }
+
+ uint32_t count = kCount;
+ INFO("Removing %u elements from RBTree and validating each operation", unsigned(kCount));
+ do {
+ MyRBNode* node;
+
+ for (key = 0; key < count; key++) {
+ node = rbTree.get(key);
+ EXPECT(node != nullptr);
+ EXPECT(node->_key == key);
+ }
+
+ node = rbTree.get(--count);
+ rbTree.remove(node);
+ ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
+ } while (count);
+
+ EXPECT(rbTree.empty());
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zonetree.h b/3rdparty/asmjit/src/asmjit/core/zonetree.h
new file mode 100644
index 00000000000..6cb88ed499b
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonetree.h
@@ -0,0 +1,385 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONETREE_H_INCLUDED
+#define ASMJIT_CORE_ZONETREE_H_INCLUDED
+
+#include "../core/support.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneTreeNode]
+// ============================================================================
+
+//! RB-Tree node.
+//!
+//! The color is stored in a least significant bit of the `left` node.
+//!
+//! WARNING: Always use accessors to access left and right children.
+class ZoneTreeNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTreeNode)
+
+ enum : uintptr_t {
+ kRedMask = 0x1,
+ kPtrMask = ~kRedMask
+ };
+
+ uintptr_t _rbNodeData[Globals::kLinkCount];
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTreeNode() noexcept
+ : _rbNodeData { 0, 0 } {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool isRed() const noexcept { return static_cast<bool>(_rbNodeData[0] & kRedMask); }
+
+ inline bool hasChild(size_t i) const noexcept { return _rbNodeData[i] > kRedMask; }
+ inline bool hasLeft() const noexcept { return _rbNodeData[0] > kRedMask; }
+ inline bool hasRight() const noexcept { return _rbNodeData[1] != 0; }
+
+ template<typename T = ZoneTreeNode>
+ inline T* child(size_t i) const noexcept { return static_cast<T*>(_getChild(i)); }
+ template<typename T = ZoneTreeNode>
+ inline T* left() const noexcept { return static_cast<T*>(_getLeft()); }
+ template<typename T = ZoneTreeNode>
+ inline T* right() const noexcept { return static_cast<T*>(_getRight()); }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ inline ZoneTreeNode* _getChild(size_t i) const noexcept { return (ZoneTreeNode*)(_rbNodeData[i] & kPtrMask); }
+ inline ZoneTreeNode* _getLeft() const noexcept { return (ZoneTreeNode*)(_rbNodeData[0] & kPtrMask); }
+ inline ZoneTreeNode* _getRight() const noexcept { return (ZoneTreeNode*)(_rbNodeData[1]); }
+
+ inline void _setChild(size_t i, ZoneTreeNode* node) noexcept { _rbNodeData[i] = (_rbNodeData[i] & kRedMask) | (uintptr_t)node; }
+ inline void _setLeft(ZoneTreeNode* node) noexcept { _rbNodeData[0] = (_rbNodeData[0] & kRedMask) | (uintptr_t)node; }
+ inline void _setRight(ZoneTreeNode* node) noexcept { _rbNodeData[1] = (uintptr_t)node; }
+
+ inline void _makeRed() noexcept { _rbNodeData[0] |= kRedMask; }
+ inline void _makeBlack() noexcept { _rbNodeData[0] &= kPtrMask; }
+
+ //! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
+ static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return node && node->isRed(); }
+
+ //! \}
+ //! \endcond
+};
+
+//! RB-Tree typed to `NodeT`.
+template<typename NodeT>
+class ZoneTreeNodeT : public ZoneTreeNode {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTreeNodeT)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTreeNodeT() noexcept
+ : ZoneTreeNode() {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_getChild(i)); }
+ inline NodeT* left() const noexcept { return static_cast<NodeT*>(_getLeft()); }
+ inline NodeT* right() const noexcept { return static_cast<NodeT*>(_getRight()); }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneTree]
+// ============================================================================
+
+//! RB-Tree.
+template<typename NodeT>
+class ZoneTree {
+public:
+ ASMJIT_NONCOPYABLE(ZoneTree)
+
+ typedef NodeT Node;
+ NodeT* _root;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneTree() noexcept
+ : _root(nullptr) {}
+
+ inline ZoneTree(ZoneTree&& other) noexcept
+ : _root(other._root) {}
+
+ inline void reset() noexcept { _root = nullptr; }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ inline bool empty() const noexcept { return _root == nullptr; }
+ inline NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneTree& other) noexcept {
+ std::swap(_root, other._root);
+ }
+
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ void insert(NodeT* node, const CompareT& cmp = CompareT()) noexcept {
+ // Node to insert must not contain garbage.
+ ASMJIT_ASSERT(!node->hasLeft());
+ ASMJIT_ASSERT(!node->hasRight());
+ ASMJIT_ASSERT(!node->isRed());
+
+ if (!_root) {
+ _root = node;
+ return;
+ }
+
+ ZoneTreeNode head; // False root node,
+ head._setRight(_root); // having root on the right.
+
+ ZoneTreeNode* g = nullptr; // Grandparent.
+ ZoneTreeNode* p = nullptr; // Parent.
+ ZoneTreeNode* t = &head; // Iterator.
+ ZoneTreeNode* q = _root; // Query.
+
+ size_t dir = 0; // Direction for accessing child nodes.
+ size_t last = 0; // Not needed to initialize, but makes some tools happy.
+
+ node->_makeRed(); // New nodes are always red and violations fixed appropriately.
+
+ // Search down the tree.
+ for (;;) {
+ if (!q) {
+ // Insert new node at the bottom.
+ q = node;
+ p->_setChild(dir, node);
+ }
+ else if (_isValidRed(q->_getLeft()) && _isValidRed(q->_getRight())) {
+ // Color flip.
+ q->_makeRed();
+ q->_getLeft()->_makeBlack();
+ q->_getRight()->_makeBlack();
+ }
+
+ // Fix red violation.
+ if (_isValidRed(q) && _isValidRed(p))
+ t->_setChild(t->_getRight() == g,
+ q == p->_getChild(last) ? _singleRotate(g, !last) : _doubleRotate(g, !last));
+
+ // Stop if found.
+ if (q == node)
+ break;
+
+ last = dir;
+ dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
+
+ // Update helpers.
+ if (g) t = g;
+
+ g = p;
+ p = q;
+ q = q->_getChild(dir);
+ }
+
+ // Update root and make it black.
+ _root = static_cast<NodeT*>(head._getRight());
+ _root->_makeBlack();
+ }
+
+ //! Remove node from RBTree.
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ void remove(ZoneTreeNode* node, const CompareT& cmp = CompareT()) noexcept {
+ ZoneTreeNode head; // False root node,
+ head._setRight(_root); // having root on the right.
+
+ ZoneTreeNode* g = nullptr; // Grandparent.
+ ZoneTreeNode* p = nullptr; // Parent.
+ ZoneTreeNode* q = &head; // Query.
+
+ ZoneTreeNode* f = nullptr; // Found item.
+ ZoneTreeNode* gf = nullptr; // Found grandparent.
+ size_t dir = 1; // Direction (0 or 1).
+
+ // Search and push a red down.
+ while (q->hasChild(dir)) {
+ size_t last = dir;
+
+ // Update helpers.
+ g = p;
+ p = q;
+ q = q->_getChild(dir);
+ dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
+
+ // Save found node.
+ if (q == node) {
+ f = q;
+ gf = g;
+ }
+
+ // Push the red node down.
+ if (!_isValidRed(q) && !_isValidRed(q->_getChild(dir))) {
+ if (_isValidRed(q->_getChild(!dir))) {
+ ZoneTreeNode* child = _singleRotate(q, dir);
+ p->_setChild(last, child);
+ p = child;
+ }
+ else if (!_isValidRed(q->_getChild(!dir)) && p->_getChild(!last)) {
+ ZoneTreeNode* s = p->_getChild(!last);
+ if (!_isValidRed(s->_getChild(!last)) && !_isValidRed(s->_getChild(last))) {
+ // Color flip.
+ p->_makeBlack();
+ s->_makeRed();
+ q->_makeRed();
+ }
+ else {
+ size_t dir2 = g->_getRight() == p;
+ ZoneTreeNode* child = g->_getChild(dir2);
+
+ if (_isValidRed(s->_getChild(last))) {
+ child = _doubleRotate(p, last);
+ g->_setChild(dir2, child);
+ }
+ else if (_isValidRed(s->_getChild(!last))) {
+ child = _singleRotate(p, last);
+ g->_setChild(dir2, child);
+ }
+
+ // Ensure correct coloring.
+ q->_makeRed();
+ child->_makeRed();
+ child->_getLeft()->_makeBlack();
+ child->_getRight()->_makeBlack();
+ }
+ }
+ }
+ }
+
+ // Replace and remove.
+ ASMJIT_ASSERT(f != nullptr);
+ ASMJIT_ASSERT(f != &head);
+ ASMJIT_ASSERT(q != &head);
+
+ p->_setChild(p->_getRight() == q,
+ q->_getChild(q->_getLeft() == nullptr));
+
+ // NOTE: The original algorithm used a trick to just copy 'key/value' to
+ // `f` and mark `q` for deletion. But this is unacceptable here as we
+ // really want to destroy the passed `node`. So, we have to make sure that
+ // we have really removed `f` and not `q`.
+ if (f != q) {
+ ASMJIT_ASSERT(f != &head);
+ ASMJIT_ASSERT(f != gf);
+
+ ZoneTreeNode* n = gf ? gf : &head;
+ dir = (n == &head) ? 1 : cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
+
+ for (;;) {
+ if (n->_getChild(dir) == f) {
+ n->_setChild(dir, q);
+ // RAW copy, including the color.
+ q->_rbNodeData[0] = f->_rbNodeData[0];
+ q->_rbNodeData[1] = f->_rbNodeData[1];
+ break;
+ }
+
+ n = n->_getChild(dir);
+
+ // Cannot be true as we know that it must reach `f` in few iterations.
+ ASMJIT_ASSERT(n != nullptr);
+ dir = cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
+ }
+ }
+
+ // Update root and make it black.
+ _root = static_cast<NodeT*>(head._getRight());
+ if (_root) _root->_makeBlack();
+ }
+
+ template<typename KeyT, typename CompareT = Support::Compare<Support::kSortAscending>>
+ ASMJIT_INLINE NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
+ ZoneTreeNode* node = _root;
+ while (node) {
+ auto result = cmp(*static_cast<const NodeT*>(node), key);
+ if (result == 0) break;
+
+ // Go left or right depending on the `result`.
+ node = node->_getChild(result < 0);
+ }
+ return static_cast<NodeT*>(node);
+ }
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return ZoneTreeNode::_isValidRed(node); }
+
+ //! Single rotation.
+ static ASMJIT_INLINE ZoneTreeNode* _singleRotate(ZoneTreeNode* root, size_t dir) noexcept {
+ ZoneTreeNode* save = root->_getChild(!dir);
+ root->_setChild(!dir, save->_getChild(dir));
+ save->_setChild( dir, root);
+ root->_makeRed();
+ save->_makeBlack();
+ return save;
+ }
+
+ //! Double rotation.
+ static ASMJIT_INLINE ZoneTreeNode* _doubleRotate(ZoneTreeNode* root, size_t dir) noexcept {
+ root->_setChild(!dir, _singleRotate(root->_getChild(!dir), !dir));
+ return _singleRotate(root, dir);
+ }
+
+ //! \}
+ //! \endcond
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONETREE_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/core/zonevector.cpp b/3rdparty/asmjit/src/asmjit/core/zonevector.cpp
new file mode 100644
index 00000000000..7ab53bf3547
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonevector.cpp
@@ -0,0 +1,375 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#include "../core/support.h"
+#include "../core/zone.h"
+#include "../core/zonevector.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+// ============================================================================
+// [asmjit::ZoneVectorBase - Helpers]
+// ============================================================================
+
+Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t threshold = Globals::kGrowThreshold / sizeOfT;
+ uint32_t capacity = _capacity;
+ uint32_t after = _size;
+
+ if (ASMJIT_UNLIKELY(std::numeric_limits<uint32_t>::max() - n < after))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ after += n;
+ if (capacity >= after)
+ return kErrorOk;
+
+ // ZoneVector is used as an array to hold short-lived data structures used
+ // during code generation. The growing strategy is simple - use small capacity
+ // at the beginning (very good for ZoneAllocator) and then grow quicker to
+ // prevent successive reallocations.
+ if (capacity < 4)
+ capacity = 4;
+ else if (capacity < 8)
+ capacity = 8;
+ else if (capacity < 16)
+ capacity = 16;
+ else if (capacity < 64)
+ capacity = 64;
+ else if (capacity < 256)
+ capacity = 256;
+
+ while (capacity < after) {
+ if (capacity < threshold)
+ capacity *= 2;
+ else
+ capacity += threshold;
+ }
+
+ return _reserve(allocator, sizeOfT, capacity);
+}
+
+Error ZoneVectorBase::_reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t oldCapacity = _capacity;
+ if (oldCapacity >= n) return kErrorOk;
+
+ uint32_t nBytes = n * sizeOfT;
+ if (ASMJIT_UNLIKELY(nBytes < n))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ size_t allocatedBytes;
+ uint8_t* newData = static_cast<uint8_t*>(allocator->alloc(nBytes, allocatedBytes));
+
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ void* oldData = _data;
+ if (_size)
+ memcpy(newData, oldData, size_t(_size) * sizeOfT);
+
+ if (oldData)
+ allocator->release(oldData, size_t(oldCapacity) * sizeOfT);
+
+ _capacity = uint32_t(allocatedBytes / sizeOfT);
+ ASMJIT_ASSERT(_capacity >= n);
+
+ _data = newData;
+ return kErrorOk;
+}
+
+Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
+ uint32_t size = _size;
+
+ if (_capacity < n) {
+ ASMJIT_PROPAGATE(_grow(allocator, sizeOfT, n - size));
+ ASMJIT_ASSERT(_capacity >= n);
+ }
+
+ if (size < n)
+ memset(static_cast<uint8_t*>(_data) + size_t(size) * sizeOfT, 0, size_t(n - size) * sizeOfT);
+
+ _size = n;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::ZoneBitVector - Ops]
+// ============================================================================
+
+Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept {
+ BitWord* data = _data;
+ uint32_t newSize = other.size();
+
+ if (!newSize) {
+ _size = 0;
+ return kErrorOk;
+ }
+
+ if (newSize > _capacity) {
+ // Realloc needed... Calculate the minimum capacity (in bytes) requied.
+ uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(newSize, kBitWordSizeInBits);
+ if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Normalize to bytes.
+ uint32_t minimumCapacity = minimumCapacityInBits / 8;
+ size_t allocatedCapacity;
+
+ BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // `allocatedCapacity` now contains number in bytes, we need bits.
+ size_t allocatedCapacityInBits = allocatedCapacity * 8;
+
+ // Arithmetic overflow should normally not happen. If it happens we just
+ // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
+ // this value is still safe to be used to call `_allocator->release(...)`.
+ if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
+ allocatedCapacityInBits = minimumCapacityInBits;
+
+ if (data)
+ allocator->release(data, _capacity / 8);
+ data = newData;
+
+ _data = data;
+ _capacity = uint32_t(allocatedCapacityInBits);
+ }
+
+ _size = newSize;
+ _copyBits(data, other.data(), _wordsPerBits(newSize));
+
+ return kErrorOk;
+}
+
+Error ZoneBitVector::_resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept {
+ ASMJIT_ASSERT(idealCapacity >= newSize);
+
+ if (newSize <= _size) {
+ // The size after the resize is lesser than or equal to the current size.
+ uint32_t idx = newSize / kBitWordSizeInBits;
+ uint32_t bit = newSize % kBitWordSizeInBits;
+
+ // Just set all bits outside of the new size in the last word to zero.
+ // There is a case that there are not bits to set if `bit` is zero. This
+ // happens when `newSize` is a multiply of `kBitWordSizeInBits` like 64, 128,
+ // and so on. In that case don't change anything as that would mean settings
+ // bits outside of the `_size`.
+ if (bit)
+ _data[idx] &= (BitWord(1) << bit) - 1u;
+
+ _size = newSize;
+ return kErrorOk;
+ }
+
+ uint32_t oldSize = _size;
+ BitWord* data = _data;
+
+ if (newSize > _capacity) {
+ // Realloc needed, calculate the minimum capacity (in bytes) requied.
+ uint32_t minimumCapacityInBits = Support::alignUp<uint32_t>(idealCapacity, kBitWordSizeInBits);
+
+ if (ASMJIT_UNLIKELY(minimumCapacityInBits < newSize))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // Normalize to bytes.
+ uint32_t minimumCapacity = minimumCapacityInBits / 8;
+ size_t allocatedCapacity;
+
+ BitWord* newData = static_cast<BitWord*>(allocator->alloc(minimumCapacity, allocatedCapacity));
+ if (ASMJIT_UNLIKELY(!newData))
+ return DebugUtils::errored(kErrorOutOfMemory);
+
+ // `allocatedCapacity` now contains number in bytes, we need bits.
+ size_t allocatedCapacityInBits = allocatedCapacity * 8;
+
+ // Arithmetic overflow should normally not happen. If it happens we just
+ // change the `allocatedCapacityInBits` to the `minimumCapacityInBits` as
+ // this value is still safe to be used to call `_allocator->release(...)`.
+ if (ASMJIT_UNLIKELY(allocatedCapacityInBits < allocatedCapacity))
+ allocatedCapacityInBits = minimumCapacityInBits;
+
+ _copyBits(newData, data, _wordsPerBits(oldSize));
+
+ if (data)
+ allocator->release(data, _capacity / 8);
+ data = newData;
+
+ _data = data;
+ _capacity = uint32_t(allocatedCapacityInBits);
+ }
+
+ // Start (of the old size) and end (of the new size) bits
+ uint32_t idx = oldSize / kBitWordSizeInBits;
+ uint32_t startBit = oldSize % kBitWordSizeInBits;
+ uint32_t endBit = newSize % kBitWordSizeInBits;
+
+ // Set new bits to either 0 or 1. The `pattern` is used to set multiple
+ // bits per bit-word and contains either all zeros or all ones.
+ BitWord pattern = Support::bitMaskFromBool<BitWord>(newBitsValue);
+
+ // First initialize the last bit-word of the old size.
+ if (startBit) {
+ uint32_t nBits = 0;
+
+ if (idx == (newSize / kBitWordSizeInBits)) {
+ // The number of bit-words is the same after the resize. In that case
+ // we need to set only bits necessary in the current last bit-word.
+ ASMJIT_ASSERT(startBit < endBit);
+ nBits = endBit - startBit;
+ }
+ else {
+ // There is be more bit-words after the resize. In that case we don't
+ // have to be extra careful about the last bit-word of the old size.
+ nBits = kBitWordSizeInBits - startBit;
+ }
+
+ data[idx++] |= pattern << nBits;
+ }
+
+ // Initialize all bit-words after the last bit-word of the old size.
+ uint32_t endIdx = _wordsPerBits(newSize);
+ while (idx < endIdx) data[idx++] = pattern;
+
+ // Clear unused bits of the last bit-word.
+ if (endBit)
+ data[endIdx - 1] = pattern & ((BitWord(1) << endBit) - 1);
+
+ _size = newSize;
+ return kErrorOk;
+}
+
+Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
+ uint32_t kThreshold = Globals::kGrowThreshold * 8;
+ uint32_t newSize = _size + 1;
+ uint32_t idealCapacity = _capacity;
+
+ if (idealCapacity < 128)
+ idealCapacity = 128;
+ else if (idealCapacity <= kThreshold)
+ idealCapacity *= 2;
+ else
+ idealCapacity += kThreshold;
+
+ if (ASMJIT_UNLIKELY(idealCapacity < _capacity)) {
+ if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max()))
+ return DebugUtils::errored(kErrorOutOfMemory);
+ idealCapacity = newSize;
+ }
+
+ return _resize(allocator, newSize, idealCapacity, value);
+}
+
+// ============================================================================
+// [asmjit::ZoneVector / ZoneBitVector - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+template<typename T>
+static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
+ int i;
+ int kMax = 100000;
+
+ ZoneVector<T> vec;
+
+ INFO("ZoneVector<%s> basic tests", typeName);
+ EXPECT(vec.append(allocator, 0) == kErrorOk);
+ EXPECT(vec.empty() == false);
+ EXPECT(vec.size() == 1);
+ EXPECT(vec.capacity() >= 1);
+ EXPECT(vec.indexOf(0) == 0);
+ EXPECT(vec.indexOf(-11) == Globals::kNotFound);
+
+ vec.clear();
+ EXPECT(vec.empty());
+ EXPECT(vec.size() == 0);
+ EXPECT(vec.indexOf(0) == Globals::kNotFound);
+
+ for (i = 0; i < kMax; i++) {
+ EXPECT(vec.append(allocator, T(i)) == kErrorOk);
+ }
+ EXPECT(vec.empty() == false);
+ EXPECT(vec.size() == uint32_t(kMax));
+ EXPECT(vec.indexOf(T(kMax - 1)) == uint32_t(kMax - 1));
+
+ vec.release(allocator);
+}
+
+static void test_zone_bitvector(ZoneAllocator* allocator) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+
+ uint32_t i, count;
+ uint32_t kMaxCount = 100;
+
+ ZoneBitVector vec;
+ EXPECT(vec.empty());
+ EXPECT(vec.size() == 0);
+
+ INFO("ZoneBitVector::resize()");
+ for (count = 1; count < kMaxCount; count++) {
+ vec.clear();
+ EXPECT(vec.resize(allocator, count, false) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < count; i++)
+ EXPECT(vec.bitAt(i) == false);
+
+ vec.clear();
+ EXPECT(vec.resize(allocator, count, true) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < count; i++)
+ EXPECT(vec.bitAt(i) == true);
+ }
+
+ INFO("ZoneBitVector::fillBits() / clearBits()");
+ for (count = 1; count < kMaxCount; count += 2) {
+ vec.clear();
+ EXPECT(vec.resize(allocator, count) == kErrorOk);
+ EXPECT(vec.size() == count);
+
+ for (i = 0; i < (count + 1) / 2; i++) {
+ bool value = bool(i & 1);
+ if (value)
+ vec.fillBits(i, count - i * 2);
+ else
+ vec.clearBits(i, count - i * 2);
+ }
+
+ for (i = 0; i < count; i++) {
+ EXPECT(vec.bitAt(i) == bool(i & 1));
+ }
+ }
+}
+
+UNIT(zone_vector) {
+ Zone zone(8096 - Zone::kBlockOverhead);
+ ZoneAllocator allocator(&zone);
+
+ test_zone_vector<int>(&allocator, "int");
+ test_zone_vector<int64_t>(&allocator, "int64_t");
+ test_zone_bitvector(&allocator);
+}
+#endif
+
+ASMJIT_END_NAMESPACE
diff --git a/3rdparty/asmjit/src/asmjit/core/zonevector.h b/3rdparty/asmjit/src/asmjit/core/zonevector.h
new file mode 100644
index 00000000000..7ee04ce7807
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/core/zonevector.h
@@ -0,0 +1,699 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
+#define ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
+
+#include "../core/support.h"
+#include "../core/zone.h"
+
+ASMJIT_BEGIN_NAMESPACE
+
+//! \addtogroup asmjit_zone
+//! \{
+
+// ============================================================================
+// [asmjit::ZoneVectorBase]
+// ============================================================================
+
+//! \cond INTERNAL
+
+//! Base class implementing core `ZoneVector<>` functionality.
+class ZoneVectorBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneVectorBase)
+
+ // STL compatibility;
+ typedef uint32_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ //! Vector data (untyped).
+ void* _data;
+ //! Size of the vector.
+ size_type _size;
+ //! Capacity of the vector.
+ size_type _capacity;
+
+protected:
+ //! \name Construction & Destruction
+ //! \{
+
+ //! Creates a new instance of `ZoneVectorBase`.
+ inline ZoneVectorBase() noexcept
+ : _data(nullptr),
+ _size(0),
+ _capacity(0) {}
+
+ inline ZoneVectorBase(ZoneVectorBase&& other) noexcept
+ : _data(other._data),
+ _size(other._size),
+ _capacity(other._capacity) {}
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ inline void _release(ZoneAllocator* allocator, uint32_t sizeOfT) noexcept {
+ if (_data != nullptr) {
+ allocator->release(_data, _capacity * sizeOfT);
+ reset();
+ }
+ }
+
+ ASMJIT_API Error _grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+ ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+ ASMJIT_API Error _reserve(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept;
+
+ inline void _swap(ZoneVectorBase& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_capacity, other._capacity);
+ }
+
+ //! \}
+
+public:
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the vector is empty.
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the vector size.
+ inline size_type size() const noexcept { return _size; }
+ //! Returns the vector capacity.
+ inline size_type capacity() const noexcept { return _capacity; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Makes the vector empty (won't change the capacity or data pointer).
+ inline void clear() noexcept { _size = 0; }
+ //! Resets the vector data and set its `size` to zero.
+ inline void reset() noexcept {
+ _data = nullptr;
+ _size = 0;
+ _capacity = 0;
+ }
+
+ //! Truncates the vector to at most `n` items.
+ inline void truncate(size_type n) noexcept {
+ _size = Support::min(_size, n);
+ }
+
+ //! Sets size of the vector to `n`. Used internally by some algorithms.
+ inline void _setSize(size_type n) noexcept {
+ ASMJIT_ASSERT(n <= _capacity);
+ _size = n;
+ }
+
+ //! \}
+};
+
+//! \endcond
+
+// ============================================================================
+// [asmjit::ZoneVector<T>]
+// ============================================================================
+
+//! Template used to store and manage array of Zone allocated data.
+//!
+//! This template has these advantages over other std::vector<>:
+//! - Always non-copyable (designed to be non-copyable, we want it).
+//! - Optimized for working only with POD types.
+//! - Uses ZoneAllocator, thus small vectors are almost for free.
+//! - Explicit allocation, ZoneAllocator is not part of the data.
+template <typename T>
+class ZoneVector : public ZoneVectorBase {
+public:
+ ASMJIT_NONCOPYABLE(ZoneVector<T>)
+
+ // STL compatibility;
+ typedef T value_type;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+ typedef T& reference;
+ typedef const T& const_reference;
+
+ typedef Support::Iterator<T> iterator;
+ typedef Support::Iterator<const T> const_iterator;
+ typedef Support::ReverseIterator<T> reverse_iterator;
+ typedef Support::ReverseIterator<const T> const_reverse_iterator;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneVector() noexcept : ZoneVectorBase() {}
+ inline ZoneVector(ZoneVector&& other) noexcept : ZoneVector(other) {}
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns vector data.
+ inline T* data() noexcept { return static_cast<T*>(_data); }
+ //! Returns vector data (const)
+ inline const T* data() const noexcept { return static_cast<const T*>(_data); }
+
+ //! Returns item at the given index `i` (const).
+ inline const T& at(uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ inline void _setEndPtr(T* p) noexcept {
+ ASMJIT_ASSERT(p >= data() && p <= data() + _capacity);
+ _setSize(uint32_t((uintptr_t)(p - data())));
+ }
+
+ //! \}
+
+ //! \name STL Compatibility (Iterators)
+ //! \{
+
+ inline iterator begin() noexcept { return iterator(data()); };
+ inline const_iterator begin() const noexcept { return const_iterator(data()); };
+
+ inline iterator end() noexcept { return iterator(data() + _size); };
+ inline const_iterator end() const noexcept { return const_iterator(data() + _size); };
+
+ inline reverse_iterator rbegin() noexcept { return reverse_iterator(data()); };
+ inline const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(data()); };
+
+ inline reverse_iterator rend() noexcept { return reverse_iterator(data() + _size); };
+ inline const_reverse_iterator rend() const noexcept { return const_reverse_iterator(data() + _size); };
+
+ inline const_iterator cbegin() const noexcept { return const_iterator(data()); };
+ inline const_iterator cend() const noexcept { return const_iterator(data() + _size); };
+
+ inline const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(data()); };
+ inline const_reverse_iterator crend() const noexcept { return const_reverse_iterator(data() + _size); };
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ //! Swaps this vector with `other`.
+ inline void swap(ZoneVector<T>& other) noexcept { _swap(other); }
+
+ //! Prepends `item` to the vector.
+ inline Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ ::memmove(static_cast<T*>(_data) + 1, _data, size_t(_size) * sizeof(T));
+ memcpy(_data, &item, sizeof(T));
+
+ _size++;
+ return kErrorOk;
+ }
+
+ //! Inserts an `item` at the specified `index`.
+ inline Error insert(ZoneAllocator* allocator, uint32_t index, const T& item) noexcept {
+ ASMJIT_ASSERT(index <= _size);
+
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ T* dst = static_cast<T*>(_data) + index;
+ ::memmove(dst + 1, dst, size_t(_size - index) * sizeof(T));
+ memcpy(dst, &item, sizeof(T));
+ _size++;
+
+ return kErrorOk;
+ }
+
+ //! Appends `item` to the vector.
+ inline Error append(ZoneAllocator* allocator, const T& item) noexcept {
+ if (ASMJIT_UNLIKELY(_size == _capacity))
+ ASMJIT_PROPAGATE(grow(allocator, 1));
+
+ memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
+ _size++;
+
+ return kErrorOk;
+ }
+
+ inline Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
+ uint32_t size = other._size;
+ if (_capacity - _size < size)
+ ASMJIT_PROPAGATE(grow(allocator, size));
+
+ if (size) {
+ memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
+ _size += size;
+ }
+
+ return kErrorOk;
+ }
+
+ //! Prepends `item` to the vector (unsafe case).
+ //!
+ //! Can only be used together with `willGrow()`. If `willGrow(N)` returns
+ //! `kErrorOk` then N elements can be added to the vector without checking
+ //! if there is a place for them. Used mostly internally.
+ inline void prependUnsafe(const T& item) noexcept {
+ ASMJIT_ASSERT(_size < _capacity);
+ T* data = static_cast<T*>(_data);
+
+ if (_size)
+ ::memmove(data + 1, data, size_t(_size) * sizeof(T));
+
+ memcpy(data, &item, sizeof(T));
+ _size++;
+ }
+
+ //! Append s`item` to the vector (unsafe case).
+ //!
+ //! Can only be used together with `willGrow()`. If `willGrow(N)` returns
+ //! `kErrorOk` then N elements can be added to the vector without checking
+ //! if there is a place for them. Used mostly internally.
+ inline void appendUnsafe(const T& item) noexcept {
+ ASMJIT_ASSERT(_size < _capacity);
+
+ memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
+ _size++;
+ }
+
+ //! Concatenates all items of `other` at the end of the vector.
+ inline void concatUnsafe(const ZoneVector<T>& other) noexcept {
+ uint32_t size = other._size;
+ ASMJIT_ASSERT(_capacity - _size >= size);
+
+ if (size) {
+ memcpy(static_cast<T*>(_data) + _size, other._data, size_t(size) * sizeof(T));
+ _size += size;
+ }
+ }
+
+ //! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
+ inline uint32_t indexOf(const T& val) const noexcept {
+ const T* data = static_cast<const T*>(_data);
+ uint32_t size = _size;
+
+ for (uint32_t i = 0; i < size; i++)
+ if (data[i] == val)
+ return i;
+ return Globals::kNotFound;
+ }
+
+ //! Tests whether the vector contains `val`.
+ inline bool contains(const T& val) const noexcept {
+ return indexOf(val) != Globals::kNotFound;
+ }
+
+ //! Removes item at index `i`.
+ inline void removeAt(uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < _size);
+
+ T* data = static_cast<T*>(_data) + i;
+ uint32_t size = --_size - i;
+
+ if (size)
+ ::memmove(data, data + 1, size_t(size) * sizeof(T));
+ }
+
+ inline T pop() noexcept {
+ ASMJIT_ASSERT(_size > 0);
+
+ uint32_t index = --_size;
+ return data()[index];
+ }
+
+ template<typename CompareT = Support::Compare<Support::kSortAscending>>
+ inline void sort(const CompareT& cmp = CompareT()) noexcept {
+ Support::qSort<T, CompareT>(data(), size(), cmp);
+ }
+
+ //! Returns item at index `i`.
+ inline T& operator[](uint32_t i) noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ //! Returns item at index `i`.
+ inline const T& operator[](uint32_t i) const noexcept {
+ ASMJIT_ASSERT(i < _size);
+ return data()[i];
+ }
+
+ inline T& first() noexcept { return operator[](0); }
+ inline const T& first() const noexcept { return operator[](0); }
+
+ inline T& last() noexcept { return operator[](_size - 1); }
+ inline const T& last() const noexcept { return operator[](_size - 1); }
+
+ //! \}
+
+ //! \name Memory Management
+ //! \{
+
+ //! Releases the memory held by `ZoneVector<T>` back to the `allocator`.
+ inline void release(ZoneAllocator* allocator) noexcept {
+ _release(allocator, sizeof(T));
+ }
+
+ //! Called to grow the buffer to fit at least `n` elements more.
+ inline Error grow(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return ZoneVectorBase::_grow(allocator, sizeof(T), n);
+ }
+
+ //! Resizes the vector to hold `n` elements.
+ //!
+ //! If `n` is greater than the current size then the additional elements'
+ //! content will be initialized to zero. If `n` is less than the current
+ //! size then the vector will be truncated to exactly `n` elements.
+ inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return ZoneVectorBase::_resize(allocator, sizeof(T), n);
+ }
+
+ //! Reallocates the internal array to fit at least `n` items.
+ inline Error reserve(ZoneAllocator* allocator, uint32_t n) noexcept {
+ return n > _capacity ? ZoneVectorBase::_reserve(allocator, sizeof(T), n) : Error(kErrorOk);
+ }
+
+ inline Error willGrow(ZoneAllocator* allocator, uint32_t n = 1) noexcept {
+ return _capacity - _size < n ? grow(allocator, n) : Error(kErrorOk);
+ }
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::ZoneBitVector]
+// ============================================================================
+
+class ZoneBitVector {
+public:
+ typedef Support::BitWord BitWord;
+ static constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
+
+ //! Bits.
+ BitWord* _data;
+ //! Size of the bit-vector (in bits).
+ uint32_t _size;
+ //! Capacity of the bit-vector (in bits).
+ uint32_t _capacity;
+
+ ASMJIT_NONCOPYABLE(ZoneBitVector)
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ static inline uint32_t _wordsPerBits(uint32_t nBits) noexcept {
+ return ((nBits + kBitWordSizeInBits - 1) / kBitWordSizeInBits);
+ }
+
+ static inline void _zeroBits(BitWord* dst, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = 0;
+ }
+
+ static inline void _fillBits(BitWord* dst, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = ~BitWord(0);
+ }
+
+ static inline void _copyBits(BitWord* dst, const BitWord* src, uint32_t nBitWords) noexcept {
+ for (uint32_t i = 0; i < nBitWords; i++)
+ dst[i] = src[i];
+ }
+
+ //! \}
+ //! \endcond
+
+ //! \name Construction & Destruction
+ //! \{
+
+ inline ZoneBitVector() noexcept
+ : _data(nullptr),
+ _size(0),
+ _capacity(0) {}
+
+ inline ZoneBitVector(ZoneBitVector&& other) noexcept
+ : _data(other._data),
+ _size(other._size),
+ _capacity(other._capacity) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline bool operator==(const ZoneBitVector& other) const noexcept { return eq(other); }
+ inline bool operator!=(const ZoneBitVector& other) const noexcept { return !eq(other); }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Tests whether the bit-vector is empty (has no bits).
+ inline bool empty() const noexcept { return _size == 0; }
+ //! Returns the size of this bit-vector (in bits).
+ inline uint32_t size() const noexcept { return _size; }
+ //! Returns the capacity of this bit-vector (in bits).
+ inline uint32_t capacity() const noexcept { return _capacity; }
+
+ //! Returns the size of the `BitWord[]` array in `BitWord` units.
+ inline uint32_t sizeInBitWords() const noexcept { return _wordsPerBits(_size); }
+ //! Returns the capacity of the `BitWord[]` array in `BitWord` units.
+ inline uint32_t capacityInBitWords() const noexcept { return _wordsPerBits(_capacity); }
+
+ //! REturns bit-vector data as `BitWord[]`.
+ inline BitWord* data() noexcept { return _data; }
+ //! \overload
+ inline const BitWord* data() const noexcept { return _data; }
+
+ //! \}
+
+ //! \name Utilities
+ //! \{
+
+ inline void swap(ZoneBitVector& other) noexcept {
+ std::swap(_data, other._data);
+ std::swap(_size, other._size);
+ std::swap(_capacity, other._capacity);
+ }
+
+ inline void clear() noexcept {
+ _size = 0;
+ }
+
+ inline void reset() noexcept {
+ _data = nullptr;
+ _size = 0;
+ _capacity = 0;
+ }
+
+ inline void truncate(uint32_t newSize) noexcept {
+ _size = Support::min(_size, newSize);
+ _clearUnusedBits();
+ }
+
+ inline bool bitAt(uint32_t index) const noexcept {
+ ASMJIT_ASSERT(index < _size);
+ return Support::bitVectorGetBit(_data, index);
+ }
+
+ inline void setBit(uint32_t index, bool value) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ Support::bitVectorSetBit(_data, index, value);
+ }
+
+ inline void flipBit(uint32_t index) noexcept {
+ ASMJIT_ASSERT(index < _size);
+ Support::bitVectorFlipBit(_data, index);
+ }
+
+ ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
+ uint32_t index = _size;
+ if (ASMJIT_UNLIKELY(index >= _capacity))
+ return _append(allocator, value);
+
+ uint32_t idx = index / kBitWordSizeInBits;
+ uint32_t bit = index % kBitWordSizeInBits;
+
+ if (bit == 0)
+ _data[idx] = BitWord(value) << bit;
+ else
+ _data[idx] |= BitWord(value) << bit;
+
+ _size++;
+ return kErrorOk;
+ }
+
+ ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
+
+ inline void clearAll() noexcept {
+ _zeroBits(_data, _wordsPerBits(_size));
+ }
+
+ inline void fillAll() noexcept {
+ _fillBits(_data, _wordsPerBits(_size));
+ _clearUnusedBits();
+ }
+
+ inline void clearBits(uint32_t start, uint32_t count) noexcept {
+ ASMJIT_ASSERT(start <= _size);
+ ASMJIT_ASSERT(_size - start >= count);
+
+ Support::bitVectorClear(_data, start, count);
+ }
+
+ inline void fillBits(uint32_t start, uint32_t count) noexcept {
+ ASMJIT_ASSERT(start <= _size);
+ ASMJIT_ASSERT(_size - start >= count);
+
+ Support::bitVectorFill(_data, start, count);
+ }
+
+ //! Performs a logical bitwise AND between bits specified in this array and bits
+ //! in `other`. If `other` has less bits than `this` then all remaining bits are
+ //! set to zero.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void and_(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t thisBitWordCount = sizeInBitWords();
+ uint32_t otherBitWordCount = other.sizeInBitWords();
+ uint32_t commonBitWordCount = Support::min(thisBitWordCount, otherBitWordCount);
+
+ uint32_t i = 0;
+ while (i < commonBitWordCount) {
+ dst[i] = dst[i] & src[i];
+ i++;
+ }
+
+ while (i < thisBitWordCount) {
+ dst[i] = 0;
+ i++;
+ }
+ }
+
+ //! Performs a logical bitwise AND between bits specified in this array and
+ //! negated bits in `other`. If `other` has less bits than `this` then all
+ //! remaining bits are kept intact.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void andNot(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
+ for (uint32_t i = 0; i < commonBitWordCount; i++)
+ dst[i] = dst[i] & ~src[i];
+ }
+
+ //! Performs a logical bitwise OP between bits specified in this array and bits
+ //! in `other`. If `other` has less bits than `this` then all remaining bits
+ //! are kept intact.
+ //!
+ //! \note The size of the BitVector is unaffected by this operation.
+ inline void or_(const ZoneBitVector& other) noexcept {
+ BitWord* dst = _data;
+ const BitWord* src = other._data;
+
+ uint32_t commonBitWordCount = _wordsPerBits(Support::min(_size, other._size));
+ for (uint32_t i = 0; i < commonBitWordCount; i++)
+ dst[i] = dst[i] | src[i];
+ _clearUnusedBits();
+ }
+
+ inline void _clearUnusedBits() noexcept {
+ uint32_t idx = _size / kBitWordSizeInBits;
+ uint32_t bit = _size % kBitWordSizeInBits;
+
+ if (!bit) return;
+ _data[idx] &= (BitWord(1) << bit) - 1u;
+ }
+
+ inline bool eq(const ZoneBitVector& other) const noexcept {
+ if (_size != other._size)
+ return false;
+
+ const BitWord* aData = _data;
+ const BitWord* bData = other._data;
+ uint32_t numBitWords = _wordsPerBits(_size);
+
+ for (uint32_t i = 0; i < numBitWords; i++)
+ if (aData[i] != bData[i])
+ return false;
+ return true;
+ }
+
+ //! \}
+
+ //! \name Memory Management
+ //! \{
+
+ inline void release(ZoneAllocator* allocator) noexcept {
+ if (!_data) return;
+ allocator->release(_data, _capacity / 8);
+ reset();
+ }
+
+ inline Error resize(ZoneAllocator* allocator, uint32_t newSize, bool newBitsValue = false) noexcept {
+ return _resize(allocator, newSize, newSize, newBitsValue);
+ }
+
+ ASMJIT_API Error _resize(ZoneAllocator* allocator, uint32_t newSize, uint32_t idealCapacity, bool newBitsValue) noexcept;
+ ASMJIT_API Error _append(ZoneAllocator* allocator, bool value) noexcept;
+
+ //! \}
+
+ //! \name Iterators
+ //! \{
+
+ class ForEachBitSet : public Support::BitVectorIterator<BitWord> {
+ public:
+ ASMJIT_INLINE explicit ForEachBitSet(const ZoneBitVector& bitVector) noexcept
+ : Support::BitVectorIterator<BitWord>(bitVector.data(), bitVector.sizeInBitWords()) {}
+ };
+
+ template<class Operator>
+ class ForEachBitOp : public Support::BitVectorOpIterator<BitWord, Operator> {
+ public:
+ ASMJIT_INLINE ForEachBitOp(const ZoneBitVector& a, const ZoneBitVector& b) noexcept
+ : Support::BitVectorOpIterator<BitWord, Operator>(a.data(), b.data(), a.sizeInBitWords()) {
+ ASMJIT_ASSERT(a.size() == b.size());
+ }
+ };
+
+ //! \}
+
+};
+
+//! \}
+
+ASMJIT_END_NAMESPACE
+
+#endif // ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86.h b/3rdparty/asmjit/src/asmjit/x86.h
new file mode 100644
index 00000000000..161b3be19fb
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86.h
@@ -0,0 +1,42 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_H_INCLUDED
+#define ASMJIT_X86_H_INCLUDED
+
+//! \defgroup asmjit_x86 X86
+//!
+//! \brief X86/X64 Backend.
+
+#include "./core.h"
+
+#include "./x86/x86assembler.h"
+#include "./x86/x86builder.h"
+#include "./x86/x86compiler.h"
+#include "./x86/x86emitter.h"
+#include "./x86/x86features.h"
+#include "./x86/x86globals.h"
+#include "./x86/x86instdb.h"
+#include "./x86/x86operand.h"
+
+#endif // ASMJIT_X86_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp b/3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp
new file mode 100644
index 00000000000..d509926e5f5
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86assembler.cpp
@@ -0,0 +1,4747 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/codebufferwriter_p.h"
+#include "../core/cpuinfo.h"
+#include "../core/logging.h"
+#include "../core/misc_p.h"
+#include "../core/support.h"
+#include "../x86/x86assembler.h"
+#include "../x86/x86instdb_p.h"
+#include "../x86/x86logging_p.h"
+#include "../x86/x86opcode_p.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [TypeDefs]
+// ============================================================================
+
+typedef Support::FastUInt8 FastUInt8;
+
+// ============================================================================
+// [Constants]
+// ============================================================================
+
+//! X86 bytes used to encode important prefixes.
+enum X86Byte : uint32_t {
+ //! 1-byte REX prefix mask.
+ kX86ByteRex = 0x40,
+
+ //! 1-byte REX.W component.
+ kX86ByteRexW = 0x08,
+
+ kX86ByteInvalidRex = 0x80,
+
+ //! 2-byte VEX prefix:
+ //! - `[0]` - `0xC5`.
+ //! - `[1]` - `RvvvvLpp`.
+ kX86ByteVex2 = 0xC5,
+
+ //! 3-byte VEX prefix:
+ //! - `[0]` - `0xC4`.
+ //! - `[1]` - `RXBmmmmm`.
+ //! - `[2]` - `WvvvvLpp`.
+ kX86ByteVex3 = 0xC4,
+
+ //! 3-byte XOP prefix:
+ //! - `[0]` - `0x8F`.
+ //! - `[1]` - `RXBmmmmm`.
+ //! - `[2]` - `WvvvvLpp`.
+ kX86ByteXop3 = 0x8F,
+
+ //! 4-byte EVEX prefix:
+ //! - `[0]` - `0x62`.
+ //! - `[1]` - Payload0 or `P[ 7: 0]` - `[R X B R' 0 0 m m]`.
+ //! - `[2]` - Payload1 or `P[15: 8]` - `[W v v v v 1 p p]`.
+ //! - `[3]` - Payload2 or `P[23:16]` - `[z L' L b V' a a a]`.
+ //!
+ //! Payload:
+ //! - `P[ 1: 0]` - OPCODE: EVEX.mmmmm, only lowest 2 bits [1:0] used.
+ //! - `P[ 3: 2]` - ______: Must be 0.
+ //! - `P[ 4]` - REG-ID: EVEX.R' - 5th bit of 'RRRRR'.
+ //! - `P[ 5]` - REG-ID: EVEX.B - 4th bit of 'BBBBB'.
+ //! - `P[ 6]` - REG-ID: EVEX.X - 5th bit of 'BBBBB' or 4th bit of 'XXXX' (with SIB).
+ //! - `P[ 7]` - REG-ID: EVEX.R - 4th bit of 'RRRRR'.
+ //! - `P[ 9: 8]` - OPCODE: EVEX.pp.
+ //! - `P[ 10]` - ______: Must be 1.
+ //! - `P[14:11]` - REG-ID: 4 bits of 'VVVV'.
+ //! - `P[ 15]` - OPCODE: EVEX.W.
+ //! - `P[18:16]` - REG-ID: K register k0...k7 (Merging/Zeroing Vector Ops).
+ //! - `P[ 19]` - REG-ID: 5th bit of 'VVVVV'.
+ //! - `P[ 20]` - OPCODE: Broadcast/Rounding Control/SAE bit.
+ //! - `P[22.21]` - OPCODE: Vector Length (L' and L) / Rounding Control.
+ //! - `P[ 23]` - OPCODE: Zeroing/Merging.
+ kX86ByteEvex = 0x62
+};
+
+// AsmJit specific (used to encode VVVVV field in XOP/VEX/EVEX).
+enum VexVVVVV : uint32_t {
+ kVexVVVVVShift = 7,
+ kVexVVVVVMask = 0x1F << kVexVVVVVShift
+};
+
+//! Instruction 2-byte/3-byte opcode prefix definition.
+struct X86OpcodeMM {
+ uint8_t size;
+ uint8_t data[3];
+};
+
+//! Mandatory prefixes used to encode legacy [66, F3, F2] or [9B] byte.
+static const uint8_t x86OpcodePP[8] = { 0x00, 0x66, 0xF3, 0xF2, 0x00, 0x00, 0x00, 0x9B };
+
+//! Instruction 2-byte/3-byte opcode prefix data.
+static const X86OpcodeMM x86OpcodeMM[] = {
+ { 0, { 0x00, 0x00, 0 } }, // #00 (0b0000).
+ { 1, { 0x0F, 0x00, 0 } }, // #01 (0b0001).
+ { 2, { 0x0F, 0x38, 0 } }, // #02 (0b0010).
+ { 2, { 0x0F, 0x3A, 0 } }, // #03 (0b0011).
+ { 2, { 0x0F, 0x01, 0 } }, // #04 (0b0100).
+ { 0, { 0x00, 0x00, 0 } }, // #05 (0b0101).
+ { 0, { 0x00, 0x00, 0 } }, // #06 (0b0110).
+ { 0, { 0x00, 0x00, 0 } }, // #07 (0b0111).
+ { 0, { 0x00, 0x00, 0 } }, // #08 (0b1000).
+ { 0, { 0x00, 0x00, 0 } }, // #09 (0b1001).
+ { 0, { 0x00, 0x00, 0 } }, // #0A (0b1010).
+ { 0, { 0x00, 0x00, 0 } }, // #0B (0b1011).
+ { 0, { 0x00, 0x00, 0 } }, // #0C (0b1100).
+ { 0, { 0x00, 0x00, 0 } }, // #0D (0b1101).
+ { 0, { 0x00, 0x00, 0 } }, // #0E (0b1110).
+ { 0, { 0x00, 0x00, 0 } } // #0F (0b1111).
+};
+
+static const uint8_t x86SegmentPrefix[8] = {
+ 0x00, // None.
+ 0x26, // ES.
+ 0x2E, // CS.
+ 0x36, // SS.
+ 0x3E, // DS.
+ 0x64, // FS.
+ 0x65 // GS.
+};
+
+static const uint32_t x86OpcodePushSReg[8] = {
+ Opcode::k000000 | 0x00, // None.
+ Opcode::k000000 | 0x06, // Push ES.
+ Opcode::k000000 | 0x0E, // Push CS.
+ Opcode::k000000 | 0x16, // Push SS.
+ Opcode::k000000 | 0x1E, // Push DS.
+ Opcode::k000F00 | 0xA0, // Push FS.
+ Opcode::k000F00 | 0xA8 // Push GS.
+};
+
+static const uint32_t x86OpcodePopSReg[8] = {
+ Opcode::k000000 | 0x00, // None.
+ Opcode::k000000 | 0x07, // Pop ES.
+ Opcode::k000000 | 0x00, // Pop CS.
+ Opcode::k000000 | 0x17, // Pop SS.
+ Opcode::k000000 | 0x1F, // Pop DS.
+ Opcode::k000F00 | 0xA1, // Pop FS.
+ Opcode::k000F00 | 0xA9 // Pop GS.
+};
+
+// ============================================================================
+// [asmjit::X86MemInfo | X86VEXPrefix | X86LLByRegType | X86CDisp8Table]
+// ============================================================================
+
+//! Memory operand's info bits.
+//!
+//! A lookup table that contains various information based on the BASE and INDEX
+//! information of a memory operand. This is much better and safer than playing
+//! with IFs in the code and can check for errors must faster and better.
+enum X86MemInfo_Enum {
+ kX86MemInfo_0 = 0x00,
+
+ kX86MemInfo_BaseGp = 0x01, //!< Has BASE reg, REX.B can be 1, compatible with REX.B byte.
+ kX86MemInfo_Index = 0x02, //!< Has INDEX reg, REX.X can be 1, compatible with REX.X byte.
+
+ kX86MemInfo_BaseLabel = 0x10, //!< Base is Label.
+ kX86MemInfo_BaseRip = 0x20, //!< Base is RIP.
+
+ kX86MemInfo_67H_X86 = 0x40, //!< Address-size override in 32-bit mode.
+ kX86MemInfo_67H_X64 = 0x80, //!< Address-size override in 64-bit mode.
+ kX86MemInfo_67H_Mask = 0xC0 //!< Contains all address-size override bits.
+};
+
+template<uint32_t X>
+struct X86MemInfo_T {
+ enum {
+ B = (X ) & 0x1F,
+ I = (X >> 5) & 0x1F,
+
+ kBase = (B >= Reg::kTypeGpw && B <= Reg::kTypeGpq ) ? kX86MemInfo_BaseGp :
+ (B == Reg::kTypeRip ) ? kX86MemInfo_BaseRip :
+ (B == Label::kLabelTag ) ? kX86MemInfo_BaseLabel : 0,
+
+ kIndex = (I >= Reg::kTypeGpw && I <= Reg::kTypeGpq ) ? kX86MemInfo_Index :
+ (I >= Reg::kTypeXmm && I <= Reg::kTypeZmm ) ? kX86MemInfo_Index : 0,
+
+ k67H = (B == Reg::kTypeGpw && I == Reg::kTypeNone) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeGpd && I == Reg::kTypeNone) ? kX86MemInfo_67H_X64 :
+ (B == Reg::kTypeNone && I == Reg::kTypeGpw ) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeNone && I == Reg::kTypeGpd ) ? kX86MemInfo_67H_X64 :
+ (B == Reg::kTypeGpw && I == Reg::kTypeGpw ) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeGpd && I == Reg::kTypeGpd ) ? kX86MemInfo_67H_X64 :
+ (B == Reg::kTypeGpw && I == Reg::kTypeXmm ) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeGpd && I == Reg::kTypeXmm ) ? kX86MemInfo_67H_X64 :
+ (B == Reg::kTypeGpw && I == Reg::kTypeYmm ) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeGpd && I == Reg::kTypeYmm ) ? kX86MemInfo_67H_X64 :
+ (B == Reg::kTypeGpw && I == Reg::kTypeZmm ) ? kX86MemInfo_67H_X86 :
+ (B == Reg::kTypeGpd && I == Reg::kTypeZmm ) ? kX86MemInfo_67H_X64 :
+ (B == Label::kLabelTag && I == Reg::kTypeGpw ) ? kX86MemInfo_67H_X86 :
+ (B == Label::kLabelTag && I == Reg::kTypeGpd ) ? kX86MemInfo_67H_X64 : 0,
+
+ kValue = kBase | kIndex | k67H | 0x04 | 0x08
+ };
+};
+
+// The result stored in the LUT is a combination of
+// - 67H - Address override prefix - depends on BASE+INDEX register types and
+// the target architecture.
+// - REX - A possible combination of REX.[B|X|R|W] bits in REX prefix where
+// REX.B and REX.X are possibly masked out, but REX.R and REX.W are
+// kept as is.
+#define VALUE(X) X86MemInfo_T<X>::kValue
+static const uint8_t x86MemInfo[] = { ASMJIT_LOOKUP_TABLE_1024(VALUE, 0) };
+#undef VALUE
+
+// VEX3 or XOP xor bits applied to the opcode before emitted. The index to this
+// table is 'mmmmm' value, which contains all we need. This is only used by a
+// 3 BYTE VEX and XOP prefixes, 2 BYTE VEX prefix is handled differently. The
+// idea is to minimize the difference between VEX3 vs XOP when encoding VEX
+// or XOP instruction. This should minimize the code required to emit such
+// instructions and should also make it faster as we don't need any branch to
+// decide between VEX3 vs XOP.
+// ____ ___
+// [_OPCODE_|WvvvvLpp|RXBmmmmm|VEX3_XOP]
+#define VALUE(X) ((X & 0x08) ? kX86ByteXop3 : kX86ByteVex3) | (0xF << 19) | (0x7 << 13)
+static const uint32_t x86VEXPrefix[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) };
+#undef VALUE
+
+// Table that contains LL opcode field addressed by a register size / 16. It's
+// used to propagate L.256 or L.512 when YMM or ZMM registers are used,
+// respectively.
+#define VALUE(X) (X & (64 >> 4)) ? Opcode::kLL_2 : \
+ (X & (32 >> 4)) ? Opcode::kLL_1 : Opcode::kLL_0
+static const uint32_t x86LLBySizeDiv16[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) };
+#undef VALUE
+
+// Table that contains LL opcode field addressed by a register size / 16. It's
+// used to propagate L.256 or L.512 when YMM or ZMM registers are used,
+// respectively.
+#define VALUE(X) X == Reg::kTypeZmm ? Opcode::kLL_2 : \
+ X == Reg::kTypeYmm ? Opcode::kLL_1 : Opcode::kLL_0
+static const uint32_t x86LLByRegType[] = { ASMJIT_LOOKUP_TABLE_16(VALUE, 0) };
+#undef VALUE
+
+// Table that contains a scale (shift left) based on 'TTWLL' field and
+// the instruction's tuple-type (TT) field. The scale is then applied to
+// the BASE-N stored in each opcode to calculate the final compressed
+// displacement used by all EVEX encoded instructions.
+template<uint32_t X>
+struct X86CDisp8SHL_T {
+ enum {
+ TT = (X >> 3) << Opcode::kCDTT_Shift,
+ LL = (X >> 0) & 0x3,
+ W = (X >> 2) & 0x1,
+
+ kValue = (TT == Opcode::kCDTT_None ? ((LL==0) ? 0 : (LL==1) ? 0 : 0 ) :
+ TT == Opcode::kCDTT_ByLL ? ((LL==0) ? 0 : (LL==1) ? 1 : 2 ) :
+ TT == Opcode::kCDTT_T1W ? ((LL==0) ? W : (LL==1) ? 1+W : 2+W) :
+ TT == Opcode::kCDTT_DUP ? ((LL==0) ? 0 : (LL==1) ? 2 : 3 ) : 0) << Opcode::kCDSHL_Shift
+ };
+};
+
+#define VALUE(X) X86CDisp8SHL_T<X>::kValue
+static const uint32_t x86CDisp8SHL[] = { ASMJIT_LOOKUP_TABLE_32(VALUE, 0) };
+#undef VALUE
+
+// Table that contains MOD byte of a 16-bit [BASE + disp] address.
+// 0xFF == Invalid.
+static const uint8_t x86Mod16BaseTable[8] = {
+ 0xFF, // AX -> N/A.
+ 0xFF, // CX -> N/A.
+ 0xFF, // DX -> N/A.
+ 0x07, // BX -> 111.
+ 0xFF, // SP -> N/A.
+ 0x06, // BP -> 110.
+ 0x04, // SI -> 100.
+ 0x05 // DI -> 101.
+};
+
+// Table that contains MOD byte of a 16-bit [BASE + INDEX + disp] combination.
+// 0xFF == Invalid.
+template<uint32_t X>
+struct X86Mod16BaseIndexTable_T {
+ enum {
+ B = X >> 3,
+ I = X & 0x7,
+
+ kValue = ((B == Gp::kIdBx && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBx)) ? 0x00 :
+ ((B == Gp::kIdBx && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBx)) ? 0x01 :
+ ((B == Gp::kIdBp && I == Gp::kIdSi) || (B == Gp::kIdSi && I == Gp::kIdBp)) ? 0x02 :
+ ((B == Gp::kIdBp && I == Gp::kIdDi) || (B == Gp::kIdDi && I == Gp::kIdBp)) ? 0x03 : 0xFF
+ };
+};
+
+#define VALUE(X) X86Mod16BaseIndexTable_T<X>::kValue
+static const uint8_t x86Mod16BaseIndexTable[] = { ASMJIT_LOOKUP_TABLE_64(VALUE, 0) };
+#undef VALUE
+
+// ============================================================================
+// [asmjit::x86::Assembler - Helpers]
+// ============================================================================
+
+static ASMJIT_INLINE bool x86IsJmpOrCall(uint32_t instId) noexcept {
+ return instId == Inst::kIdJmp || instId == Inst::kIdCall;
+}
+
+static ASMJIT_INLINE bool x86IsImplicitMem(const Operand_& op, uint32_t base) noexcept {
+ return op.isMem() && op.as<Mem>().baseId() == base && !op.as<Mem>().hasOffset();
+}
+
+//! Combine `regId` and `vvvvvId` into a single value (used by AVX and AVX-512).
+static ASMJIT_INLINE uint32_t x86PackRegAndVvvvv(uint32_t regId, uint32_t vvvvvId) noexcept {
+ return regId + (vvvvvId << kVexVVVVVShift);
+}
+
+static ASMJIT_INLINE uint32_t x86OpcodeLByVMem(const Operand_& op) noexcept {
+ return x86LLByRegType[op.as<Mem>().indexType()];
+}
+
+static ASMJIT_INLINE uint32_t x86OpcodeLBySize(uint32_t size) noexcept {
+ return x86LLBySizeDiv16[size / 16];
+}
+
+//! Encode MOD byte.
+static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcept {
+ ASMJIT_ASSERT(m <= 3);
+ ASMJIT_ASSERT(o <= 7);
+ ASMJIT_ASSERT(rm <= 7);
+ return (m << 6) + (o << 3) + rm;
+}
+
+//! Encode SIB byte.
+static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) noexcept {
+ ASMJIT_ASSERT(s <= 3);
+ ASMJIT_ASSERT(i <= 7);
+ ASMJIT_ASSERT(b <= 7);
+ return (s << 6) + (i << 3) + b;
+}
+
+static ASMJIT_INLINE bool x86IsRexInvalid(uint32_t rex) noexcept {
+ // Validates the following possibilities:
+ // REX == 0x00 -> OKAY (X86_32 / X86_64).
+ // REX == 0x40-0x4F -> OKAY (X86_64).
+ // REX == 0x80 -> OKAY (X86_32 mode, rex prefix not used).
+ // REX == 0x81-0xCF -> BAD (X86_32 mode, rex prefix used).
+ return rex > kX86ByteInvalidRex;
+}
+
+template<typename T>
+static constexpr T x86SignExtendI32(T imm) noexcept { return T(int64_t(int32_t(imm & T(0xFFFFFFFF)))); }
+
+static ASMJIT_INLINE uint32_t x86AltOpcodeOf(const InstDB::InstInfo* info) noexcept {
+ return InstDB::_altOpcodeTable[info->_altOpcodeIndex];
+}
+
+// ============================================================================
+// [asmjit::X86BufferWriter]
+// ============================================================================
+
+class X86BufferWriter : public CodeBufferWriter {
+public:
+ ASMJIT_INLINE explicit X86BufferWriter(Assembler* a) noexcept
+ : CodeBufferWriter(a) {}
+
+ ASMJIT_INLINE void emitPP(uint32_t opcode) noexcept {
+ uint32_t ppIndex = (opcode >> Opcode::kPP_Shift) &
+ (Opcode::kPP_FPUMask >> Opcode::kPP_Shift) ;
+ emit8If(x86OpcodePP[ppIndex], ppIndex != 0);
+ }
+
+ ASMJIT_INLINE void emitMMAndOpcode(uint32_t opcode) noexcept {
+ uint32_t mmIndex = (opcode & Opcode::kMM_Mask) >> Opcode::kMM_Shift;
+ const X86OpcodeMM& mmCode = x86OpcodeMM[mmIndex];
+
+ emit8If(mmCode.data[0], mmCode.size > 0);
+ emit8If(mmCode.data[1], mmCode.size > 1);
+ emit8(opcode);
+ }
+
+ ASMJIT_INLINE void emitSegmentOverride(uint32_t segmentId) noexcept {
+ ASMJIT_ASSERT(segmentId < ASMJIT_ARRAY_SIZE(x86SegmentPrefix));
+
+ FastUInt8 prefix = x86SegmentPrefix[segmentId];
+ emit8If(prefix, prefix != 0);
+ }
+
+ template<typename CondT>
+ ASMJIT_INLINE void emitAddressOverride(CondT condition) noexcept {
+ emit8If(0x67, condition);
+ }
+
+ ASMJIT_INLINE void emitImmByteOrDWord(uint64_t immValue, FastUInt8 immSize) noexcept {
+ if (!immSize)
+ return;
+
+ ASMJIT_ASSERT(immSize == 1 || immSize == 4);
+
+#if ASMJIT_ARCH_BITS >= 64
+ uint64_t imm = uint64_t(immValue);
+#else
+ uint32_t imm = uint32_t(immValue & 0xFFFFFFFFu);
+#endif
+
+ // Many instructions just use a single byte immediate, so make it fast.
+ emit8(imm & 0xFFu);
+ if (immSize == 1) return;
+
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ }
+
+ ASMJIT_INLINE void emitImmediate(uint64_t immValue, FastUInt8 immSize) noexcept {
+ if (!immSize)
+ return;
+
+#if ASMJIT_ARCH_BITS >= 64
+ uint64_t imm = uint64_t(immValue);
+#else
+ uint32_t imm = uint32_t(immValue & 0xFFFFFFFFu);
+#endif
+
+ // Many instructions just use a single byte immediate, so make it fast.
+ emit8(imm & 0xFFu);
+ if (--immSize == 0) return;
+
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ if (--immSize == 0) return;
+
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ if (--immSize == 0) return;
+
+ imm >>= 8;
+ emit8(imm & 0xFFu);
+ if (--immSize == 0) return;
+
+ // Can be 1, 2, 4 or 8 bytes, this handles the remaining high DWORD of an 8-byte immediate.
+ ASMJIT_ASSERT(immSize == 4);
+
+#if ASMJIT_ARCH_BITS >= 64
+ imm >>= 8;
+ emit32uLE(uint32_t(imm));
+#else
+ emit32uLE(uint32_t((uint64_t(immValue) >> 32) & 0xFFFFFFFFu));
+#endif
+ }
+};
+
+// If the operand is BPL|SPL|SIL|DIL|R8B-15B
+// - Force REX prefix
+// If the operand is AH|BH|CH|DH
+// - patch its index from 0..3 to 4..7 as encoded by X86.
+// - Disallow REX prefix.
+#define FIXUP_GPB(REG_OP, REG_ID) \
+ do { \
+ if (!static_cast<const Gp&>(REG_OP).isGpbHi()) { \
+ options |= (REG_ID >= 4) ? uint32_t(Inst::kOptionRex) \
+ : uint32_t(0); \
+ } \
+ else { \
+ options |= Inst::_kOptionInvalidRex; \
+ REG_ID += 4; \
+ } \
+ } while (0)
+
+#define ENC_OPS1(OP0) ((Operand::kOp##OP0))
+#define ENC_OPS2(OP0, OP1) ((Operand::kOp##OP0) + ((Operand::kOp##OP1) << 3))
+#define ENC_OPS3(OP0, OP1, OP2) ((Operand::kOp##OP0) + ((Operand::kOp##OP1) << 3) + ((Operand::kOp##OP2) << 6))
+#define ENC_OPS4(OP0, OP1, OP2, OP3) ((Operand::kOp##OP0) + ((Operand::kOp##OP1) << 3) + ((Operand::kOp##OP2) << 6) + ((Operand::kOp##OP3) << 9))
+
+// ============================================================================
+// [asmjit::x86::Assembler - Movabs Heuristics]
+// ============================================================================
+
+static ASMJIT_INLINE bool x86GetMovAbsInstSize64Bit(uint32_t regSize, uint32_t options, const Mem& rmRel) noexcept {
+ uint32_t segmentPrefixSize = rmRel.segmentId() != 0;
+ uint32_t _66hPrefixSize = regSize == 2;
+ uint32_t rexPrefixSize = (regSize == 8) || ((options & Inst::kOptionRex) != 0);
+ uint32_t opCodeByteSize = 1;
+ uint32_t immediateSize = 8;
+
+ return segmentPrefixSize + _66hPrefixSize + rexPrefixSize + opCodeByteSize + immediateSize;
+}
+
+static ASMJIT_INLINE uint32_t x86GetMovAbsAddrType(Assembler* self, X86BufferWriter& writer, uint32_t regSize, uint32_t options, const Mem& rmRel) noexcept {
+ uint32_t addrType = rmRel.addrType();
+ int64_t addrValue = rmRel.offset();
+
+ if (addrType == BaseMem::kAddrTypeDefault && !(options & Inst::kOptionModMR)) {
+ if (self->is64Bit()) {
+ uint64_t baseAddress = self->codeInfo().baseAddress();
+ if (baseAddress != Globals::kNoBaseAddress && !rmRel.hasSegment()) {
+ uint32_t instructionSize = x86GetMovAbsInstSize64Bit(regSize, options, rmRel);
+ uint64_t virtualOffset = uint64_t(writer.offsetFrom(self->_bufferData));
+ uint64_t rip64 = baseAddress + self->_section->offset() + virtualOffset + instructionSize;
+ uint64_t rel64 = uint64_t(addrValue) - rip64;
+
+ if (!Support::isInt32(int64_t(rel64)))
+ addrType = BaseMem::kAddrTypeAbs;
+ }
+ else {
+ if (!Support::isInt32(addrValue))
+ addrType = BaseMem::kAddrTypeAbs;
+ }
+ }
+ else {
+ addrType = BaseMem::kAddrTypeAbs;
+ }
+ }
+
+ return addrType;
+}
+
+// ============================================================================
+// [asmjit::x86::Assembler - Construction / Destruction]
+// ============================================================================
+
+Assembler::Assembler(CodeHolder* code) noexcept : BaseAssembler() {
+ if (code)
+ code->attach(this);
+}
+Assembler::~Assembler() noexcept {}
+
+// ============================================================================
+// [asmjit::x86::Assembler - Emit (Low-Level)]
+// ============================================================================
+
+ASMJIT_FAVOR_SPEED Error Assembler::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
+ constexpr uint32_t kVSHR_W = Opcode::kW_Shift - 23;
+ constexpr uint32_t kVSHR_PP = Opcode::kPP_Shift - 16;
+ constexpr uint32_t kVSHR_PP_EW = Opcode::kPP_Shift - 16;
+
+ constexpr uint32_t kRequiresSpecialHandling =
+ Inst::kOptionReserved | // Logging/Validation/Error.
+ Inst::kOptionRep | // REP/REPE prefix.
+ Inst::kOptionRepne | // REPNE prefix.
+ Inst::kOptionLock | // LOCK prefix.
+ Inst::kOptionXAcquire | // XACQUIRE prefix.
+ Inst::kOptionXRelease ; // XRELEASE prefix.
+
+ Error err;
+
+ Opcode opcode; // Instruction opcode.
+ uint32_t options; // Instruction options.
+ uint32_t isign3; // A combined signature of first 3 operands.
+
+ const Operand_* rmRel; // Memory operand or operand that holds Label|Imm.
+ uint32_t rmInfo; // Memory operand's info based on x86MemInfo.
+ uint32_t rbReg; // Memory base or modRM register.
+ uint32_t rxReg; // Memory index register.
+ uint32_t opReg; // ModR/M opcode or register id.
+
+ LabelEntry* label; // Label entry.
+ RelocEntry* re = nullptr; // Relocation entry.
+ int32_t relOffset; // Relative offset
+ FastUInt8 relSize = 0; // Relative size.
+ uint8_t* memOpAOMark = nullptr; // Marker that points before 'address-override prefix' is emitted.
+
+ int64_t immValue = 0; // Immediate value (must be 64-bit).
+ FastUInt8 immSize = 0; // Immediate size.
+
+ X86BufferWriter writer(this);
+
+ if (instId >= Inst::_kIdCount)
+ instId = 0;
+
+ const InstDB::InstInfo* instInfo = &InstDB::_instInfoTable[instId];
+ const InstDB::CommonInfo* commonInfo = &instInfo->commonInfo();
+
+ // Signature of the first 3 operands.
+ isign3 = o0.opType() + (o1.opType() << 3) + (o2.opType() << 6);
+
+ // Combine all instruction options and also check whether the instruction
+ // is valid. All options that require special handling (including invalid
+ // instruction) are handled by the next branch.
+ options = uint32_t(instId == 0);
+ options |= uint32_t((size_t)(_bufferEnd - writer.cursor()) < 16);
+ options |= uint32_t(instOptions() | globalInstOptions());
+
+ // Handle failure and rare cases first.
+ if (ASMJIT_UNLIKELY(options & kRequiresSpecialHandling)) {
+ if (ASMJIT_UNLIKELY(!_code))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ // Unknown instruction.
+ if (ASMJIT_UNLIKELY(instId == 0))
+ goto InvalidInstruction;
+
+ // Grow request, happens rarely.
+ err = writer.ensureSpace(this, 16);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+#ifndef ASMJIT_NO_VALIDATION
+ // Strict validation.
+ if (hasEmitterOption(kOptionStrictValidation)) {
+ Operand_ opArray[Globals::kMaxOpCount];
+
+ opArray[0].copyFrom(o0);
+ opArray[1].copyFrom(o1);
+ opArray[2].copyFrom(o2);
+ opArray[3].copyFrom(o3);
+
+ if (options & Inst::kOptionOp4Op5Used) {
+ opArray[4].copyFrom(_op4);
+ opArray[5].copyFrom(_op5);
+ }
+ else {
+ opArray[4].reset();
+ opArray[5].reset();
+ }
+
+ err = InstAPI::validate(archId(), BaseInst(instId, options, _extraReg), opArray, Globals::kMaxOpCount);
+ if (ASMJIT_UNLIKELY(err)) goto Failed;
+ }
+#endif
+
+ uint32_t iFlags = instInfo->flags();
+
+ // LOCK, XACQUIRE, and XRELEASE prefixes.
+ if (options & Inst::kOptionLock) {
+ bool xAcqRel = (options & (Inst::kOptionXAcquire | Inst::kOptionXRelease)) != 0;
+
+ if (ASMJIT_UNLIKELY(!(iFlags & (InstDB::kFlagLock)) && !xAcqRel))
+ goto InvalidLockPrefix;
+
+ if (xAcqRel) {
+ if (ASMJIT_UNLIKELY((options & Inst::kOptionXAcquire) && !(iFlags & InstDB::kFlagXAcquire)))
+ goto InvalidXAcquirePrefix;
+
+ if (ASMJIT_UNLIKELY((options & Inst::kOptionXRelease) && !(iFlags & InstDB::kFlagXRelease)))
+ goto InvalidXReleasePrefix;
+
+ writer.emit8((options & Inst::kOptionXAcquire) ? 0xF2 : 0xF3);
+ }
+
+ writer.emit8(0xF0);
+ }
+
+ // REP and REPNE prefixes.
+ if (options & (Inst::kOptionRep | Inst::kOptionRepne)) {
+ if (ASMJIT_UNLIKELY(!(iFlags & InstDB::kFlagRep)))
+ goto InvalidRepPrefix;
+
+ if (_extraReg.isReg() && ASMJIT_UNLIKELY(_extraReg.group() != Reg::kGroupGp || _extraReg.id() != Gp::kIdCx))
+ goto InvalidRepPrefix;
+
+ writer.emit8((options & Inst::kOptionRepne) ? 0xF2 : 0xF3);
+ }
+ }
+
+ // This sequence seems to be the fastest.
+ opcode = InstDB::_mainOpcodeTable[instInfo->_mainOpcodeIndex];
+ opReg = opcode.extractO();
+ opcode |= instInfo->_mainOpcodeValue;
+
+ // --------------------------------------------------------------------------
+ // [Encoding Scope]
+ // --------------------------------------------------------------------------
+
+ switch (instInfo->_encoding) {
+ case InstDB::kEncodingNone:
+ goto EmitDone;
+
+ // ------------------------------------------------------------------------
+ // [X86]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingX86Op:
+ goto EmitX86Op;
+
+ case InstDB::kEncodingX86Op_O_I8:
+ if (ASMJIT_UNLIKELY(isign3 != ENC_OPS1(Imm)))
+ goto InvalidInstruction;
+
+ immValue = o0.as<Imm>().u8();
+ immSize = 1;
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86Op_O:
+ rbReg = 0;
+ goto EmitX86R;
+
+ case InstDB::kEncodingX86Op_xAddr:
+ if (ASMJIT_UNLIKELY(!o0.isReg()))
+ goto InvalidInstruction;
+
+ rmInfo = x86MemInfo[o0.as<Reg>().type()];
+ writer.emitAddressOverride((rmInfo & _addressOverrideMask()) != 0);
+ goto EmitX86Op;
+
+ case InstDB::kEncodingX86Op_xAX:
+ if (isign3 == 0)
+ goto EmitX86Op;
+
+ if (isign3 == ENC_OPS1(Reg) && o0.id() == Gp::kIdAx)
+ goto EmitX86Op;
+ break;
+
+ case InstDB::kEncodingX86Op_xDX_xAX:
+ if (isign3 == 0)
+ goto EmitX86Op;
+
+ if (isign3 == ENC_OPS2(Reg, Reg) && o0.id() == Gp::kIdDx && o1.id() == Gp::kIdAx)
+ goto EmitX86Op;
+ break;
+
+ case InstDB::kEncodingX86Op_MemZAX:
+ if (isign3 == 0)
+ goto EmitX86Op;
+
+ rmRel = &o0;
+ if (isign3 == ENC_OPS1(Mem) && x86IsImplicitMem(o0, Gp::kIdAx))
+ goto EmitX86OpImplicitMem;
+
+ break;
+
+ case InstDB::kEncodingX86I_xAX:
+ // Implicit form.
+ if (isign3 == ENC_OPS1(Imm)) {
+ immValue = o0.as<Imm>().u8();
+ immSize = 1;
+ goto EmitX86Op;
+ }
+
+ // Explicit form.
+ if (isign3 == ENC_OPS2(Reg, Imm) && o0.id() == Gp::kIdAx) {
+ immValue = o1.as<Imm>().u8();
+ immSize = 1;
+ goto EmitX86Op;
+ }
+ break;
+
+ case InstDB::kEncodingX86M:
+ opcode.addPrefixBySize(o0.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86M_NoSize:
+ rbReg = o0.id();
+ if (isign3 == ENC_OPS1(Reg))
+ goto EmitX86R;
+
+ rmRel = &o0;
+ if (isign3 == ENC_OPS1(Mem))
+ goto EmitX86M;
+ break;
+
+ case InstDB::kEncodingX86M_GPB_MulDiv:
+CaseX86M_GPB_MulDiv:
+ // Explicit form?
+ if (isign3 > 0x7) {
+ // [AX] <- [AX] div|mul r8.
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(!Reg::isGpw(o0, Gp::kIdAx) || !Reg::isGpb(o1)))
+ goto InvalidInstruction;
+
+ rbReg = o1.id();
+ FIXUP_GPB(o1, rbReg);
+ goto EmitX86R;
+ }
+
+ // [AX] <- [AX] div|mul m8.
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ if (ASMJIT_UNLIKELY(!Reg::isGpw(o0, Gp::kIdAx)))
+ goto InvalidInstruction;
+
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // [?DX:?AX] <- [?DX:?AX] div|mul r16|r32|r64
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o0.size() != o1.size()))
+ goto InvalidInstruction;
+
+ opcode.addArithBySize(o0.size());
+ rbReg = o2.id();
+ goto EmitX86R;
+ }
+
+ // [?DX:?AX] <- [?DX:?AX] div|mul m16|m32|m64
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ if (ASMJIT_UNLIKELY(o0.size() != o1.size()))
+ goto InvalidInstruction;
+
+ opcode.addArithBySize(o0.size());
+ rmRel = &o2;
+ goto EmitX86M;
+ }
+
+ goto InvalidInstruction;
+ }
+
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86M_GPB:
+ if (isign3 == ENC_OPS1(Reg)) {
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+
+ if (o0.size() != 1)
+ goto EmitX86R;
+
+ FIXUP_GPB(o0, rbReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 0))
+ goto AmbiguousOperandSize;
+
+ opcode.addArithBySize(o0.size());
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86M_Only:
+ if (isign3 == ENC_OPS1(Mem)) {
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86M_Nop:
+ if (isign3 == ENC_OPS1(None))
+ goto EmitX86Op;
+
+ // Multi-byte NOP instruction "0F 1F /0".
+ opcode = Opcode::k000F00 | 0x1F;
+ opReg = 0;
+
+ if (isign3 == ENC_OPS1(Reg)) {
+ opcode.add66hBySize(o0.size());
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ opcode.add66hBySize(o0.size());
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86R_Native:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+ break;
+
+ case InstDB::kEncodingX86Rm:
+ opcode.addPrefixBySize(o0.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86Rm_NoSize:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Rm_Raw66H:
+ // We normally emit either [66|F2|F3], this instruction requires 66+[F2|F3].
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (o0.size() == 2)
+ writer.emit8(0x66);
+ else
+ opcode.addWBySize(o0.size());
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+
+ if (o0.size() == 2)
+ writer.emit8(0x66);
+ else
+ opcode.addWBySize(o0.size());
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Mr:
+ opcode.addPrefixBySize(o0.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86Mr_NoSize:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ rbReg = o0.id();
+ opReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ rmRel = &o0;
+ opReg = o1.id();
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Arith:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opcode += 2;
+ opcode.addArithBySize(o0.size());
+
+ if (o0.size() != o1.size())
+ goto OperandSizeMismatch;
+
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (o0.size() == 1) {
+ FIXUP_GPB(o0, opReg);
+ FIXUP_GPB(o1, rbReg);
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode -= 2;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+ else {
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode -= 2;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode += 2;
+ opcode.addArithBySize(o0.size());
+
+ opReg = o0.id();
+ rmRel = &o1;
+
+ if (o0.size() != 1)
+ goto EmitX86M;
+
+ FIXUP_GPB(o0, opReg);
+ goto EmitX86M;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addArithBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+
+ if (o1.size() != 1)
+ goto EmitX86M;
+
+ FIXUP_GPB(o1, opReg);
+ goto EmitX86M;
+ }
+
+ // The remaining instructions use 0x80 opcode.
+ opcode = 0x80;
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ uint32_t size = o0.size();
+
+ rbReg = o0.id();
+ immValue = o1.as<Imm>().i64();
+
+ if (size == 1) {
+ FIXUP_GPB(o0, rbReg);
+ immSize = 1;
+ }
+ else {
+ if (size == 2) {
+ opcode |= Opcode::kPP_66;
+ }
+ else if (size == 4) {
+ // Sign extend so isInt8 returns the right result.
+ immValue = x86SignExtendI32<int64_t>(immValue);
+ }
+ else if (size == 8) {
+ bool canTransformTo32Bit = instId == Inst::kIdAnd && Support::isUInt32(immValue);
+
+ if (!Support::isInt32(immValue)) {
+ // We would do this by default when `kOptionOptimizedForSize` is
+ // enabled, however, in this case we just force this as otherwise
+ // we would have to fail.
+ if (canTransformTo32Bit)
+ size = 4;
+ else
+ goto InvalidImmediate;
+ }
+ else if (canTransformTo32Bit && hasEmitterOption(kOptionOptimizedForSize)) {
+ // This is a code-size optimization.
+ size = 4;
+ }
+
+ opcode.addWBySize(size);
+ }
+
+ immSize = FastUInt8(Support::min<uint32_t>(size, 4));
+ if (Support::isInt8(immValue) && !(options & Inst::kOptionLongForm))
+ immSize = 1;
+ }
+
+ // Short form - AL, AX, EAX, RAX.
+ if (rbReg == 0 && (size == 1 || immSize != 1) && !(options & Inst::kOptionLongForm)) {
+ opcode &= Opcode::kPP_66 | Opcode::kW;
+ opcode |= ((opReg << 3) | (0x04 + (size != 1)));
+ immSize = FastUInt8(Support::min<uint32_t>(size, 4));
+ goto EmitX86Op;
+ }
+
+ opcode += size != 1 ? (immSize != 1 ? 1 : 3) : 0;
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Imm)) {
+ uint32_t memSize = o0.size();
+
+ if (ASMJIT_UNLIKELY(memSize == 0))
+ goto AmbiguousOperandSize;
+
+ immValue = o1.as<Imm>().i64();
+ immSize = FastUInt8(Support::min<uint32_t>(memSize, 4));
+
+ // Sign extend so isInt8 returns the right result.
+ if (memSize == 4)
+ immValue = x86SignExtendI32<int64_t>(immValue);
+
+ if (Support::isInt8(immValue) && !(options & Inst::kOptionLongForm))
+ immSize = 1;
+
+ opcode += memSize != 1 ? (immSize != 1 ? 1 : 3) : 0;
+ opcode.addPrefixBySize(memSize);
+
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Bswap:
+ if (isign3 == ENC_OPS1(Reg)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 1))
+ goto InvalidInstruction;
+
+ opReg = o0.id();
+ opcode.addPrefixBySize(o0.size());
+ goto EmitX86OpReg;
+ }
+ break;
+
+ case InstDB::kEncodingX86Bt:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opcode.addPrefixBySize(o1.size());
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addPrefixBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+
+ // The remaining instructions use the secondary opcode/r.
+ immValue = o1.as<Imm>().i64();
+ immSize = 1;
+
+ opcode = x86AltOpcodeOf(instInfo);
+ opcode.addPrefixBySize(o0.size());
+ opReg = opcode.extractO();
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Imm)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 0))
+ goto AmbiguousOperandSize;
+
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Call:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ rmRel = &o0;
+ if (isign3 == ENC_OPS1(Mem))
+ goto EmitX86M;
+
+ // Call with 32-bit displacement use 0xE8 opcode. Call with 8-bit
+ // displacement is not encodable so the alternative opcode field
+ // in X86DB must be zero.
+ opcode = 0xE8;
+ opReg = 0;
+ goto EmitJmpCall;
+
+ case InstDB::kEncodingX86Cmpxchg: {
+ // Convert explicit to implicit.
+ if (isign3 & (0x7 << 6)) {
+ if (!Reg::isGp(o2) || o2.id() != Gp::kIdAx)
+ goto InvalidInstruction;
+ isign3 &= 0x3F;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (o0.size() != o1.size())
+ goto OperandSizeMismatch;
+
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+ opReg = o1.id();
+
+ if (o0.size() != 1)
+ goto EmitX86R;
+
+ FIXUP_GPB(o0, rbReg);
+ FIXUP_GPB(o1, opReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addArithBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+
+ if (o1.size() != 1)
+ goto EmitX86M;
+
+ FIXUP_GPB(o0, opReg);
+ goto EmitX86M;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingX86Cmpxchg8b_16b: {
+ if (isign3 == ENC_OPS3(Mem, Reg, Reg)) {
+ if (o3.isReg() && _op4.isReg()) {
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingX86Crc:
+ opReg = o0.id();
+ opcode.addWBySize(o0.size());
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ rbReg = o1.id();
+
+ if (o1.size() == 1) {
+ FIXUP_GPB(o1, rbReg);
+ goto EmitX86R;
+ }
+ else {
+ // This seems to be the only exception of encoding '66F2' prefix.
+ if (o1.size() == 2) writer.emit8(0x66);
+
+ opcode.add(1);
+ goto EmitX86R;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ rmRel = &o1;
+ if (o1.size() == 0)
+ goto AmbiguousOperandSize;
+
+ // This seems to be the only exception of encoding '66F2' prefix.
+ if (o1.size() == 2) writer.emit8(0x66);
+
+ opcode += o1.size() != 1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Enter:
+ if (isign3 == ENC_OPS2(Imm, Imm)) {
+ uint32_t iw = o0.as<Imm>().u16();
+ uint32_t ib = o1.as<Imm>().u8();
+
+ immValue = iw | (ib << 16);
+ immSize = 3;
+ goto EmitX86Op;
+ }
+ break;
+
+ case InstDB::kEncodingX86Imul:
+ // First process all forms distinct of `kEncodingX86M_OptB_MulDiv`.
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opcode = 0x6B;
+ opcode.addPrefixBySize(o0.size());
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (!Support::isInt8(immValue) || (options & Inst::kOptionLongForm)) {
+ opcode -= 2;
+ immSize = o0.size() == 2 ? 2 : 4;
+ }
+
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opcode = 0x6B;
+ opcode.addPrefixBySize(o0.size());
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ // Sign extend so isInt8 returns the right result.
+ if (o0.size() == 4)
+ immValue = x86SignExtendI32<int64_t>(immValue);
+
+ if (!Support::isInt8(immValue) || (options & Inst::kOptionLongForm)) {
+ opcode -= 2;
+ immSize = o0.size() == 2 ? 2 : 4;
+ }
+
+ opReg = o0.id();
+ rmRel = &o1;
+
+ goto EmitX86M;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ // Must be explicit 'ax, r8' form.
+ if (o1.size() == 1)
+ goto CaseX86M_GPB_MulDiv;
+
+ if (o0.size() != o1.size())
+ goto OperandSizeMismatch;
+
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ opcode = Opcode::k000F00 | 0xAF;
+ opcode.addPrefixBySize(o0.size());
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ // Must be explicit 'ax, m8' form.
+ if (o1.size() == 1)
+ goto CaseX86M_GPB_MulDiv;
+
+ opReg = o0.id();
+ rmRel = &o1;
+
+ opcode = Opcode::k000F00 | 0xAF;
+ opcode.addPrefixBySize(o0.size());
+ goto EmitX86M;
+ }
+
+ // Shorthand to imul 'reg, reg, imm'.
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ opcode = 0x6B;
+ opcode.addPrefixBySize(o0.size());
+
+ immValue = o1.as<Imm>().i64();
+ immSize = 1;
+
+ // Sign extend so isInt8 returns the right result.
+ if (o0.size() == 4)
+ immValue = x86SignExtendI32<int64_t>(immValue);
+
+ if (!Support::isInt8(immValue) || (options & Inst::kOptionLongForm)) {
+ opcode -= 2;
+ immSize = o0.size() == 2 ? 2 : 4;
+ }
+
+ opReg = rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ // Try implicit form.
+ goto CaseX86M_GPB_MulDiv;
+
+ case InstDB::kEncodingX86In:
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdAx))
+ goto InvalidInstruction;
+
+ immValue = o1.as<Imm>().u8();
+ immSize = 1;
+
+ opcode = x86AltOpcodeOf(instInfo) + (o0.size() != 1);
+ opcode.add66hBySize(o0.size());
+ goto EmitX86Op;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdAx || o1.id() != Gp::kIdDx))
+ goto InvalidInstruction;
+
+ opcode += o0.size() != 1;
+ opcode.add66hBySize(o0.size());
+ goto EmitX86Op;
+ }
+ break;
+
+ case InstDB::kEncodingX86Ins:
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ if (ASMJIT_UNLIKELY(!x86IsImplicitMem(o0, Gp::kIdDi) || o1.id() != Gp::kIdDx))
+ goto InvalidInstruction;
+
+ uint32_t size = o0.size();
+ if (ASMJIT_UNLIKELY(size == 0))
+ goto AmbiguousOperandSize;
+
+ rmRel = &o0;
+ opcode += (size != 1);
+
+ opcode.add66hBySize(size);
+ goto EmitX86OpImplicitMem;
+ }
+ break;
+
+ case InstDB::kEncodingX86IncDec:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+
+ if (o0.size() == 1) {
+ FIXUP_GPB(o0, rbReg);
+ goto EmitX86R;
+ }
+
+ if (is32Bit()) {
+ // INC r16|r32 is only encodable in 32-bit mode (collides with REX).
+ opcode = x86AltOpcodeOf(instInfo) + (rbReg & 0x07);
+ opcode.add66hBySize(o0.size());
+ goto EmitX86Op;
+ }
+ else {
+ opcode.addArithBySize(o0.size());
+ goto EmitX86R;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ opcode.addArithBySize(o0.size());
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Int:
+ if (isign3 == ENC_OPS1(Imm)) {
+ immValue = o0.as<Imm>().i64();
+ immSize = 1;
+ goto EmitX86Op;
+ }
+ break;
+
+ case InstDB::kEncodingX86Jcc:
+ if (_emitterOptions & kOptionPredictedJumps) {
+ if (options & Inst::kOptionTaken)
+ writer.emit8(0x3E);
+ if (options & Inst::kOptionNotTaken)
+ writer.emit8(0x2E);
+ }
+
+ rmRel = &o0;
+ opReg = 0;
+ goto EmitJmpCall;
+
+ case InstDB::kEncodingX86JecxzLoop:
+ rmRel = &o0;
+ // Explicit jecxz|loop [r|e]cx, dst
+ if (o0.isReg()) {
+ if (ASMJIT_UNLIKELY(!Reg::isGp(o0, Gp::kIdCx)))
+ goto InvalidInstruction;
+
+ writer.emitAddressOverride((is32Bit() && o0.size() == 2) || (is64Bit() && o0.size() == 4));
+ rmRel = &o1;
+ }
+
+ opReg = 0;
+ goto EmitJmpCall;
+
+ case InstDB::kEncodingX86Jmp:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ rmRel = &o0;
+ if (isign3 == ENC_OPS1(Mem))
+ goto EmitX86M;
+
+ // Jump encoded with 32-bit displacement use 0xE9 opcode. Jump encoded
+ // with 8-bit displacement's opcode is stored as an alternative opcode.
+ opcode = 0xE9;
+ opReg = 0;
+ goto EmitJmpCall;
+
+ case InstDB::kEncodingX86JmpRel:
+ rmRel = &o0;
+ goto EmitJmpCall;
+
+ case InstDB::kEncodingX86Lea:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode.addPrefixBySize(o0.size());
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Mov:
+ // Reg <- Reg
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ // Asmjit uses segment registers indexed from 1 to 6, leaving zero as
+ // "no segment register used". We have to fix this (decrement the index
+ // of the register) when emitting MOV instructions which move to/from
+ // a segment register. The segment register is always `opReg`, because
+ // the MOV instruction uses either RM or MR encoding.
+
+ // GP <- ??
+ if (Reg::isGp(o0)) {
+ // GP <- GP
+ if (Reg::isGp(o1)) {
+ uint32_t size0 = o0.size();
+ uint32_t size1 = o1.size();
+
+ if (size0 != size1) {
+ // We allow 'mov r64, r32' as it's basically zero-extend.
+ if (size0 == 8 && size1 == 4)
+ size0 = 4; // Zero extend, don't promote to 64-bit.
+ else
+ goto InvalidInstruction;
+ }
+
+ if (size0 == 1) {
+ FIXUP_GPB(o0, opReg);
+ FIXUP_GPB(o1, rbReg);
+ opcode = 0x8A;
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode -= 2;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+ else {
+ opcode = 0x8B;
+ opcode.addPrefixBySize(size0);
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode -= 2;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+ }
+
+ opReg = rbReg;
+ rbReg = o0.id();
+
+ // GP <- SReg
+ if (Reg::isSReg(o1)) {
+ opcode = 0x8C;
+ opcode.addPrefixBySize(o0.size());
+ opReg--;
+ goto EmitX86R;
+ }
+
+ // GP <- CReg
+ if (Reg::isCReg(o1)) {
+ opcode = Opcode::k000F00 | 0x20;
+
+ // Use `LOCK MOV` in 32-bit mode if CR8+ register is accessed (AMD extension).
+ if ((opReg & 0x8) && is32Bit()) {
+ writer.emit8(0xF0);
+ opReg &= 0x7;
+ }
+ goto EmitX86R;
+ }
+
+ // GP <- DReg
+ if (Reg::isDReg(o1)) {
+ opcode = Opcode::k000F00 | 0x21;
+ goto EmitX86R;
+ }
+ }
+ else {
+ // ?? <- GP
+ if (!Reg::isGp(o1))
+ goto InvalidInstruction;
+
+ // SReg <- GP
+ if (Reg::isSReg(o0)) {
+ opcode = 0x8E;
+ opcode.addPrefixBySize(o1.size());
+ opReg--;
+ goto EmitX86R;
+ }
+
+ // CReg <- GP
+ if (Reg::isCReg(o0)) {
+ opcode = Opcode::k000F00 | 0x22;
+
+ // Use `LOCK MOV` in 32-bit mode if CR8+ register is accessed (AMD extension).
+ if ((opReg & 0x8) && is32Bit()) {
+ writer.emit8(0xF0);
+ opReg &= 0x7;
+ }
+ goto EmitX86R;
+ }
+
+ // DReg <- GP
+ if (Reg::isDReg(o0)) {
+ opcode = Opcode::k000F00 | 0x23;
+ goto EmitX86R;
+ }
+ }
+
+ goto InvalidInstruction;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+
+ // SReg <- Mem
+ if (Reg::isSReg(o0)) {
+ opcode = 0x8E;
+ opcode.addPrefixBySize(o1.size());
+ opReg--;
+ goto EmitX86M;
+ }
+ // Reg <- Mem
+ else {
+ opcode = 0;
+ opcode.addArithBySize(o0.size());
+
+ if (o0.size() == 1)
+ FIXUP_GPB(o0, opReg);
+
+ // Handle a special form of `mov al|ax|eax|rax, [ptr64]` that doesn't use MOD.
+ if (opReg == Gp::kIdAx && !rmRel->as<Mem>().hasBaseOrIndex()) {
+ immValue = rmRel->as<Mem>().offset();
+ if (x86GetMovAbsAddrType(this, writer, o0.size(), options, rmRel->as<Mem>()) == BaseMem::kAddrTypeAbs) {
+ opcode += 0xA0;
+ goto EmitX86OpMovAbs;
+ }
+ }
+
+ opcode += 0x8A;
+ goto EmitX86M;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+
+ // Mem <- SReg
+ if (Reg::isSReg(o1)) {
+ opcode = 0x8C;
+ opcode.addPrefixBySize(o0.size());
+ opReg--;
+ goto EmitX86M;
+ }
+ // Mem <- Reg
+ else {
+ opcode = 0;
+ opcode.addArithBySize(o1.size());
+
+ if (o1.size() == 1)
+ FIXUP_GPB(o1, opReg);
+
+ // Handle a special form of `mov [ptr64], al|ax|eax|rax` that doesn't use MOD.
+ if (opReg == Gp::kIdAx && !rmRel->as<Mem>().hasBaseOrIndex()) {
+ immValue = rmRel->as<Mem>().offset();
+ if (x86GetMovAbsAddrType(this, writer, o1.size(), options, rmRel->as<Mem>()) == BaseMem::kAddrTypeAbs) {
+ opcode += 0xA2;
+ goto EmitX86OpMovAbs;
+ }
+ }
+
+ opcode += 0x88;
+ goto EmitX86M;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ opReg = o0.id();
+ immSize = FastUInt8(o0.size());
+
+ if (immSize == 1) {
+ FIXUP_GPB(o0, opReg);
+
+ opcode = 0xB0;
+ immValue = o1.as<Imm>().u8();
+ goto EmitX86OpReg;
+ }
+ else {
+ // 64-bit immediate in 64-bit mode is allowed.
+ immValue = o1.as<Imm>().i64();
+
+ // Optimize the instruction size by using a 32-bit immediate if possible.
+ if (immSize == 8 && !(options & Inst::kOptionLongForm)) {
+ if (Support::isUInt32(immValue) && hasEmitterOption(kOptionOptimizedForSize)) {
+ // Zero-extend by using a 32-bit GPD destination instead of a 64-bit GPQ.
+ immSize = 4;
+ }
+ else if (Support::isInt32(immValue)) {
+ // Sign-extend, uses 'C7 /0' opcode.
+ rbReg = opReg;
+
+ opcode = Opcode::kW | 0xC7;
+ opReg = 0;
+
+ immSize = 4;
+ goto EmitX86R;
+ }
+ }
+
+ opcode = 0xB8;
+ opcode.addPrefixBySize(immSize);
+ goto EmitX86OpReg;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Imm)) {
+ uint32_t memSize = o0.size();
+ if (ASMJIT_UNLIKELY(memSize == 0))
+ goto AmbiguousOperandSize;
+
+ opcode = 0xC6 + (memSize != 1);
+ opcode.addPrefixBySize(memSize);
+ opReg = 0;
+ rmRel = &o0;
+
+ immValue = o1.as<Imm>().i64();
+ immSize = FastUInt8(Support::min<uint32_t>(memSize, 4));
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86MovsxMovzx:
+ opcode.add(o1.size() != 1);
+ opcode.addPrefixBySize(o0.size());
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (o1.size() != 1)
+ goto EmitX86R;
+
+ FIXUP_GPB(o1, rbReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86MovntiMovdiri:
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addWIf(Reg::isGpq(o1));
+
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86EnqcmdMovdir64b:
+ if (isign3 == ENC_OPS2(Mem, Mem)) {
+ const Mem& m0 = o0.as<Mem>();
+ // This is the only required validation, the rest is handled afterwards.
+ if (ASMJIT_UNLIKELY(m0.baseType() != o1.as<Mem>().baseType() ||
+ m0.hasIndex() ||
+ m0.hasOffset() ||
+ (m0.hasSegment() && m0.segmentId() != SReg::kIdEs)))
+ goto InvalidInstruction;
+
+ // The first memory operand is passed via register, the second memory operand is RM.
+ opReg = o0.as<Mem>().baseId();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Out:
+ if (isign3 == ENC_OPS2(Imm, Reg)) {
+ if (ASMJIT_UNLIKELY(o1.id() != Gp::kIdAx))
+ goto InvalidInstruction;
+
+ opcode = x86AltOpcodeOf(instInfo) + (o1.size() != 1);
+ opcode.add66hBySize(o1.size());
+
+ immValue = o0.as<Imm>().u8();
+ immSize = 1;
+ goto EmitX86Op;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdDx || o1.id() != Gp::kIdAx))
+ goto InvalidInstruction;
+
+ opcode.add(o1.size() != 1);
+ opcode.add66hBySize(o1.size());
+ goto EmitX86Op;
+ }
+ break;
+
+ case InstDB::kEncodingX86Outs:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdDx || !x86IsImplicitMem(o1, Gp::kIdSi)))
+ goto InvalidInstruction;
+
+ uint32_t size = o1.size();
+ if (ASMJIT_UNLIKELY(size == 0))
+ goto AmbiguousOperandSize;
+
+ rmRel = &o1;
+ opcode.add(size != 1);
+ opcode.add66hBySize(size);
+ goto EmitX86OpImplicitMem;
+ }
+ break;
+
+ case InstDB::kEncodingX86Push:
+ if (isign3 == ENC_OPS1(Reg)) {
+ if (Reg::isSReg(o0)) {
+ uint32_t segment = o0.id();
+ if (ASMJIT_UNLIKELY(segment >= SReg::kIdCount))
+ goto InvalidSegment;
+
+ opcode = x86OpcodePushSReg[segment];
+ goto EmitX86Op;
+ }
+ else {
+ goto CaseX86PushPop_Gp;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Imm)) {
+ immValue = o0.as<Imm>().i64();
+ immSize = 4;
+
+ if (Support::isInt8(immValue) && !(options & Inst::kOptionLongForm))
+ immSize = 1;
+
+ opcode = immSize == 1 ? 0x6A : 0x68;
+ goto EmitX86Op;
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86Pop:
+ if (isign3 == ENC_OPS1(Reg)) {
+ if (Reg::isSReg(o0)) {
+ uint32_t segment = o0.id();
+ if (ASMJIT_UNLIKELY(segment == SReg::kIdCs || segment >= SReg::kIdCount))
+ goto InvalidSegment;
+
+ opcode = x86OpcodePopSReg[segment];
+ goto EmitDone;
+ }
+ else {
+CaseX86PushPop_Gp:
+ // We allow 2 byte, 4 byte, and 8 byte register sizes, although PUSH
+ // and POP only allow 2 bytes or native size. On 64-bit we simply
+ // PUSH/POP 64-bit register even if 32-bit register was given.
+ if (ASMJIT_UNLIKELY(o0.size() < 2))
+ goto InvalidInstruction;
+
+ opcode = x86AltOpcodeOf(instInfo);
+ opcode.add66hBySize(o0.size());
+ opReg = o0.id();
+ goto EmitX86OpReg;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 0))
+ goto AmbiguousOperandSize;
+
+ if (ASMJIT_UNLIKELY(o0.size() != 2 && o0.size() != gpSize()))
+ goto InvalidInstruction;
+
+ opcode.add66hBySize(o0.size());
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Ret:
+ if (isign3 == 0) {
+ // 'ret' without immediate, change C2 to C3.
+ opcode.add(1);
+ goto EmitX86Op;
+ }
+
+ if (isign3 == ENC_OPS1(Imm)) {
+ immValue = o0.as<Imm>().i64();
+ if (immValue == 0 && !(options & Inst::kOptionLongForm)) {
+ // 'ret' without immediate, change C2 to C3.
+ opcode.add(1);
+ goto EmitX86Op;
+ }
+ else {
+ immSize = 2;
+ goto EmitX86Op;
+ }
+ }
+ break;
+
+ case InstDB::kEncodingX86Rot:
+ if (o0.isReg()) {
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+
+ if (o0.size() == 1)
+ FIXUP_GPB(o0, rbReg);
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o1.id() != Gp::kIdCx))
+ goto InvalidInstruction;
+
+ opcode += 2;
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ immValue = o1.as<Imm>().i64() & 0xFF;
+ immSize = 0;
+
+ if (immValue == 1 && !(options & Inst::kOptionLongForm))
+ goto EmitX86R;
+
+ opcode -= 0x10;
+ immSize = 1;
+ goto EmitX86R;
+ }
+ }
+ else {
+ opcode.addArithBySize(o0.size());
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ if (ASMJIT_UNLIKELY(o1.id() != Gp::kIdCx))
+ goto InvalidInstruction;
+
+ opcode += 2;
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Imm)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 0))
+ goto AmbiguousOperandSize;
+
+ rmRel = &o0;
+ immValue = o1.as<Imm>().i64() & 0xFF;
+ immSize = 0;
+
+ if (immValue == 1 && !(options & Inst::kOptionLongForm))
+ goto EmitX86M;
+
+ opcode -= 0x10;
+ immSize = 1;
+ goto EmitX86M;
+ }
+ }
+ break;
+
+ case InstDB::kEncodingX86Set:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ FIXUP_GPB(o0, rbReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86ShldShrd:
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opcode.addPrefixBySize(o0.size());
+ opReg = o1.id();
+ rbReg = o0.id();
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Imm)) {
+ opcode.addPrefixBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+ goto EmitX86M;
+ }
+
+ // The following instructions use opcode + 1.
+ opcode.add(1);
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o2.id() != Gp::kIdCx))
+ goto InvalidInstruction;
+
+ opcode.addPrefixBySize(o0.size());
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Reg)) {
+ if (ASMJIT_UNLIKELY(o2.id() != Gp::kIdCx))
+ goto InvalidInstruction;
+
+ opcode.addPrefixBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86StrRm:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ rmRel = &o1;
+ if (ASMJIT_UNLIKELY(rmRel->as<Mem>().offsetLo32() || !Reg::isGp(o0.as<Reg>(), Gp::kIdAx)))
+ goto InvalidInstruction;
+
+ uint32_t size = o0.size();
+ if (o1.hasSize() && ASMJIT_UNLIKELY(o1.size() != size))
+ goto OperandSizeMismatch;
+
+ opcode.addArithBySize(size);
+ goto EmitX86OpImplicitMem;
+ }
+ break;
+
+ case InstDB::kEncodingX86StrMr:
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ rmRel = &o0;
+ if (ASMJIT_UNLIKELY(rmRel->as<Mem>().offsetLo32() || !Reg::isGp(o1.as<Reg>(), Gp::kIdAx)))
+ goto InvalidInstruction;
+
+ uint32_t size = o1.size();
+ if (o0.hasSize() && ASMJIT_UNLIKELY(o0.size() != size))
+ goto OperandSizeMismatch;
+
+ opcode.addArithBySize(size);
+ goto EmitX86OpImplicitMem;
+ }
+ break;
+
+ case InstDB::kEncodingX86StrMm:
+ if (isign3 == ENC_OPS2(Mem, Mem)) {
+ if (ASMJIT_UNLIKELY(o0.as<Mem>().baseAndIndexTypes() !=
+ o1.as<Mem>().baseAndIndexTypes()))
+ goto InvalidInstruction;
+
+ rmRel = &o1;
+ if (ASMJIT_UNLIKELY(o0.as<Mem>().hasOffset()))
+ goto InvalidInstruction;
+
+ uint32_t size = o1.size();
+ if (ASMJIT_UNLIKELY(size == 0))
+ goto AmbiguousOperandSize;
+
+ if (ASMJIT_UNLIKELY(o0.size() != size))
+ goto OperandSizeMismatch;
+
+ opcode.addArithBySize(size);
+ goto EmitX86OpImplicitMem;
+ }
+ break;
+
+ case InstDB::kEncodingX86Test:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (o0.size() != o1.size())
+ goto OperandSizeMismatch;
+
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+ opReg = o1.id();
+
+ if (o0.size() != 1)
+ goto EmitX86R;
+
+ FIXUP_GPB(o0, rbReg);
+ FIXUP_GPB(o1, opReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addArithBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+
+ if (o1.size() != 1)
+ goto EmitX86M;
+
+ FIXUP_GPB(o1, opReg);
+ goto EmitX86M;
+ }
+
+ // The following instructions use the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+ opReg = opcode.extractO();
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+
+ if (o0.size() == 1) {
+ FIXUP_GPB(o0, rbReg);
+ immValue = o1.as<Imm>().u8();
+ immSize = 1;
+ }
+ else {
+ immValue = o1.as<Imm>().i64();
+ immSize = FastUInt8(Support::min<uint32_t>(o0.size(), 4));
+ }
+
+ // Short form - AL, AX, EAX, RAX.
+ if (rbReg == 0 && !(options & Inst::kOptionLongForm)) {
+ opcode &= Opcode::kPP_66 | Opcode::kW;
+ opcode |= 0xA8 + (o0.size() != 1);
+ goto EmitX86Op;
+ }
+
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Imm)) {
+ if (ASMJIT_UNLIKELY(o0.size() == 0))
+ goto AmbiguousOperandSize;
+
+ opcode.addArithBySize(o0.size());
+ rmRel = &o0;
+
+ immValue = o1.as<Imm>().i64();
+ immSize = FastUInt8(Support::min<uint32_t>(o0.size(), 4));
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Xchg:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode.addArithBySize(o0.size());
+ opReg = o0.id();
+ rmRel = &o1;
+
+ if (o0.size() != 1)
+ goto EmitX86M;
+
+ FIXUP_GPB(o0, opReg);
+ goto EmitX86M;
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingX86Xadd:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opcode.addArithBySize(o0.size());
+ rbReg = o0.id();
+ opReg = o1.id();
+
+ if (o0.size() != o1.size())
+ goto OperandSizeMismatch;
+
+ if (o0.size() == 1) {
+ FIXUP_GPB(o0, rbReg);
+ FIXUP_GPB(o1, opReg);
+ goto EmitX86R;
+ }
+
+ // Special opcode for 'xchg ?ax, reg'.
+ if (instId == Inst::kIdXchg && (opReg == 0 || rbReg == 0)) {
+ opcode &= Opcode::kPP_66 | Opcode::kW;
+ opcode |= 0x90;
+ // One of `xchg a, b` or `xchg b, a` is AX/EAX/RAX.
+ opReg += rbReg;
+ goto EmitX86OpReg;
+ }
+ else {
+ goto EmitX86R;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.addArithBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+
+ if (o1.size() == 1) {
+ FIXUP_GPB(o1, opReg);
+ }
+
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingX86Fence:
+ rbReg = 0;
+ goto EmitX86R;
+
+ case InstDB::kEncodingX86Bndmov:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ // ModRM encoding:
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ // ModMR encoding:
+ opcode = x86AltOpcodeOf(instInfo);
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode = x86AltOpcodeOf(instInfo);
+
+ rmRel = &o0;
+ opReg = o1.id();
+ goto EmitX86M;
+ }
+ break;
+
+ // ------------------------------------------------------------------------
+ // [FPU]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingFpuOp:
+ goto EmitFpuOp;
+
+ case InstDB::kEncodingFpuArith:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ // We switch to the alternative opcode if the first operand is zero.
+ if (opReg == 0) {
+CaseFpuArith_Reg:
+ opcode = ((0xD8 << Opcode::kFPU_2B_Shift) ) +
+ ((opcode >> Opcode::kFPU_2B_Shift) & 0xFF) + rbReg;
+ goto EmitFpuOp;
+ }
+ else if (rbReg == 0) {
+ rbReg = opReg;
+ opcode = ((0xDC << Opcode::kFPU_2B_Shift) ) +
+ ((opcode ) & 0xFF) + rbReg;
+ goto EmitFpuOp;
+ }
+ else {
+ goto InvalidInstruction;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+CaseFpuArith_Mem:
+ // 0xD8/0xDC, depends on the size of the memory operand; opReg is valid.
+ opcode = (o0.size() == 4) ? 0xD8 : 0xDC;
+ // Clear compressed displacement before going to EmitX86M.
+ opcode &= ~uint32_t(Opcode::kCDSHL_Mask);
+
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingFpuCom:
+ if (isign3 == 0) {
+ rbReg = 1;
+ goto CaseFpuArith_Reg;
+ }
+
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ goto CaseFpuArith_Reg;
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ goto CaseFpuArith_Mem;
+ }
+ break;
+
+ case InstDB::kEncodingFpuFldFst:
+ if (isign3 == ENC_OPS1(Mem)) {
+ rmRel = &o0;
+
+ if (o0.size() == 4 && commonInfo->hasFlag(InstDB::kFlagFpuM32)) {
+ goto EmitX86M;
+ }
+
+ if (o0.size() == 8 && commonInfo->hasFlag(InstDB::kFlagFpuM64)) {
+ opcode += 4;
+ goto EmitX86M;
+ }
+
+ if (o0.size() == 10 && commonInfo->hasFlag(InstDB::kFlagFpuM80)) {
+ opcode = x86AltOpcodeOf(instInfo);
+ opReg = opcode.extractO();
+ goto EmitX86M;
+ }
+ }
+
+ if (isign3 == ENC_OPS1(Reg)) {
+ if (instId == Inst::kIdFld ) { opcode = (0xD9 << Opcode::kFPU_2B_Shift) + 0xC0 + o0.id(); goto EmitFpuOp; }
+ if (instId == Inst::kIdFst ) { opcode = (0xDD << Opcode::kFPU_2B_Shift) + 0xD0 + o0.id(); goto EmitFpuOp; }
+ if (instId == Inst::kIdFstp) { opcode = (0xDD << Opcode::kFPU_2B_Shift) + 0xD8 + o0.id(); goto EmitFpuOp; }
+ }
+ break;
+
+ case InstDB::kEncodingFpuM:
+ if (isign3 == ENC_OPS1(Mem)) {
+ // Clear compressed displacement before going to EmitX86M.
+ opcode &= ~uint32_t(Opcode::kCDSHL_Mask);
+
+ rmRel = &o0;
+ if (o0.size() == 2 && commonInfo->hasFlag(InstDB::kFlagFpuM16)) {
+ opcode += 4;
+ goto EmitX86M;
+ }
+
+ if (o0.size() == 4 && commonInfo->hasFlag(InstDB::kFlagFpuM32)) {
+ goto EmitX86M;
+ }
+
+ if (o0.size() == 8 && commonInfo->hasFlag(InstDB::kFlagFpuM64)) {
+ opcode = x86AltOpcodeOf(instInfo) & ~uint32_t(Opcode::kCDSHL_Mask);
+ opReg = opcode.extractO();
+ goto EmitX86M;
+ }
+ }
+ break;
+
+ case InstDB::kEncodingFpuRDef:
+ if (isign3 == 0) {
+ opcode += 1;
+ goto EmitFpuOp;
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingFpuR:
+ if (isign3 == ENC_OPS1(Reg)) {
+ opcode += o0.id();
+ goto EmitFpuOp;
+ }
+ break;
+
+ case InstDB::kEncodingFpuStsw:
+ if (isign3 == ENC_OPS1(Reg)) {
+ if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdAx))
+ goto InvalidInstruction;
+
+ opcode = x86AltOpcodeOf(instInfo);
+ goto EmitFpuOp;
+ }
+
+ if (isign3 == ENC_OPS1(Mem)) {
+ // Clear compressed displacement before going to EmitX86M.
+ opcode &= ~uint32_t(Opcode::kCDSHL_Mask);
+
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ // ------------------------------------------------------------------------
+ // [Ext]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingExtPextrw:
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o1));
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Imm)) {
+ // Secondary opcode of 'pextrw' instruction (SSE4.1).
+ opcode = x86AltOpcodeOf(instInfo);
+ opcode.add66hIf(Reg::isXmm(o1));
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtExtract:
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o1));
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o1));
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtMov:
+ // GP|MM|XMM <- GP|MM|XMM
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (!(options & Inst::kOptionModMR) || !instInfo->_altOpcodeIndex)
+ goto EmitX86R;
+
+ opcode = x86AltOpcodeOf(instInfo);
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+
+ // GP|MM|XMM <- Mem
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // The following instruction uses opcode[1].
+ opcode = x86AltOpcodeOf(instInfo);
+
+ // Mem <- GP|MM|XMM
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtMovbe:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ if (o0.size() == 1)
+ goto InvalidInstruction;
+
+ opcode.addPrefixBySize(o0.size());
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ if (o1.size() == 1)
+ goto InvalidInstruction;
+
+ opcode.addPrefixBySize(o1.size());
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtMovd:
+CaseExtMovd:
+ opReg = o0.id();
+ opcode.add66hIf(Reg::isXmm(o0));
+
+ // MM/XMM <- Gp
+ if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o1)) {
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ // MM/XMM <- Mem
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // The following instructions use the secondary opcode.
+ opcode &= Opcode::kW;
+ opcode |= x86AltOpcodeOf(instInfo);
+ opReg = o1.id();
+ opcode.add66hIf(Reg::isXmm(o1));
+
+ // GP <- MM/XMM
+ if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o0)) {
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+
+ // Mem <- MM/XMM
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ rmRel = &o0;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtMovq:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ // MM <- MM
+ if (Reg::isMm(o0) && Reg::isMm(o1)) {
+ opcode = Opcode::k000F00 | 0x6F;
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode += 0x10;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+
+ // XMM <- XMM
+ if (Reg::isXmm(o0) && Reg::isXmm(o1)) {
+ opcode = Opcode::kF30F00 | 0x7E;
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitX86R;
+
+ opcode = Opcode::k660F00 | 0xD6;
+ std::swap(opReg, rbReg);
+ goto EmitX86R;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+
+ // MM <- Mem
+ if (Reg::isMm(o0)) {
+ opcode = Opcode::k000F00 | 0x6F;
+ goto EmitX86M;
+ }
+
+ // XMM <- Mem
+ if (Reg::isXmm(o0)) {
+ opcode = Opcode::kF30F00 | 0x7E;
+ goto EmitX86M;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+
+ // Mem <- MM
+ if (Reg::isMm(o1)) {
+ opcode = Opcode::k000F00 | 0x7F;
+ goto EmitX86M;
+ }
+
+ // Mem <- XMM
+ if (Reg::isXmm(o1)) {
+ opcode = Opcode::k660F00 | 0xD6;
+ goto EmitX86M;
+ }
+ }
+
+ // MOVQ in other case is simply a MOVD instruction promoted to 64-bit.
+ opcode |= Opcode::kW;
+ goto CaseExtMovd;
+
+ case InstDB::kEncodingExtRm_XMM0:
+ if (ASMJIT_UNLIKELY(!o2.isNone() && !Reg::isXmm(o2, 0)))
+ goto InvalidInstruction;
+
+ isign3 &= 0x3F;
+ goto CaseExtRm;
+
+ case InstDB::kEncodingExtRm_ZDI:
+ if (ASMJIT_UNLIKELY(!o2.isNone() && !x86IsImplicitMem(o2, Gp::kIdDi)))
+ goto InvalidInstruction;
+
+ isign3 &= 0x3F;
+ goto CaseExtRm;
+
+ case InstDB::kEncodingExtRm_Wx:
+ opcode.addWIf(Reg::isGpq(o0) || o1.size() == 8);
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingExtRm:
+CaseExtRm:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtRm_P:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opcode.add66hIf(Reg::isXmm(o0) | Reg::isXmm(o1));
+
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode.add66hIf(Reg::isXmm(o0));
+
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtRmRi:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+ opReg = opcode.extractO();
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ immValue = o1.as<Imm>().i64();
+ immSize = 1;
+
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+ break;
+
+ case InstDB::kEncodingExtRmRi_P:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opcode.add66hIf(Reg::isXmm(o0) | Reg::isXmm(o1));
+
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode.add66hIf(Reg::isXmm(o0));
+
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+ opReg = opcode.extractO();
+
+ if (isign3 == ENC_OPS2(Reg, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o0));
+
+ immValue = o1.as<Imm>().i64();
+ immSize = 1;
+
+ rbReg = o0.id();
+ goto EmitX86R;
+ }
+ break;
+
+ case InstDB::kEncodingExtRmi:
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ case InstDB::kEncodingExtRmi_P:
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o0) | Reg::isXmm(o1));
+
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opcode.add66hIf(Reg::isXmm(o0));
+
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ // ------------------------------------------------------------------------
+ // [Extrq / Insertq (SSE4A)]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingExtExtrq:
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (isign3 == ENC_OPS2(Reg, Reg))
+ goto EmitX86R;
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS3(Reg, Imm, Imm)) {
+ immValue = (o1.as<Imm>().u32() ) +
+ (o2.as<Imm>().u32() << 8) ;
+ immSize = 2;
+
+ rbReg = opcode.extractO();
+ goto EmitX86R;
+ }
+ break;
+
+ case InstDB::kEncodingExtInsertq: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ if (isign4 == ENC_OPS2(Reg, Reg))
+ goto EmitX86R;
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Imm, Imm)) {
+ immValue = (o2.as<Imm>().u32() ) +
+ (o3.as<Imm>().u32() << 8) ;
+ immSize = 2;
+ goto EmitX86R;
+ }
+ break;
+ }
+
+ // ------------------------------------------------------------------------
+ // [3dNow]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingExt3dNow:
+ // Every 3dNow instruction starts with 0x0F0F and the actual opcode is
+ // stored as 8-bit immediate.
+ immValue = opcode.v & 0xFFu;
+ immSize = 1;
+
+ opcode = Opcode::k000F00 | 0x0F;
+ opReg = o0.id();
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ rbReg = o1.id();
+ goto EmitX86R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ rmRel = &o1;
+ goto EmitX86M;
+ }
+ break;
+
+ // ------------------------------------------------------------------------
+ // [VEX/EVEX]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingVexOp:
+ goto EmitVexEvexOp;
+
+ case InstDB::kEncodingVexKmov:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+
+ // Form 'k, reg'.
+ if (Reg::isGp(o1)) {
+ opcode = x86AltOpcodeOf(instInfo);
+ goto EmitVexEvexR;
+ }
+
+ // Form 'reg, k'.
+ if (Reg::isGp(o0)) {
+ opcode = x86AltOpcodeOf(instInfo) + 1;
+ goto EmitVexEvexR;
+ }
+
+ // Form 'k, k'.
+ if (!(options & Inst::kOptionModMR))
+ goto EmitVexEvexR;
+
+ opcode.add(1);
+ std::swap(opReg, rbReg);
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+
+ goto EmitVexEvexM;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode.add(1);
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexR_Wx:
+ if (isign3 == ENC_OPS1(Reg)) {
+ rbReg = o0.id();
+ opcode.addWIf(o0.as<Reg>().isGpq());
+ goto EmitVexEvexR;
+ }
+ break;
+
+ case InstDB::kEncodingVexM:
+ if (isign3 == ENC_OPS1(Mem)) {
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexM_VM:
+ if (isign3 == ENC_OPS1(Mem)) {
+ opcode |= x86OpcodeLByVMem(o0);
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexMr_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexMr_VM:
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode |= Support::max(x86OpcodeLByVMem(o0), x86OpcodeLBySize(o1.size()));
+
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexMri_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexMri:
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Imm)) {
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRm_ZDI:
+ if (ASMJIT_UNLIKELY(!o2.isNone() && !x86IsImplicitMem(o2, Gp::kIdDi)))
+ goto InvalidInstruction;
+
+ isign3 &= 0x3F;
+ goto CaseVexRm;
+
+ case InstDB::kEncodingVexRm_Wx:
+ opcode.addWIf(Reg::isGpq(o0) | Reg::isGpq(o1));
+ goto CaseVexRm;
+
+ case InstDB::kEncodingVexRm_Lx_Bcst:
+ if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o1.as<Reg>())) {
+ opcode = x86AltOpcodeOf(instInfo) | x86OpcodeLBySize(o0.size() | o1.size());
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRm_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRm:
+CaseVexRm:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRm_VM:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size()));
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRm_T1_4X: {
+ if (!(options & Inst::kOptionOp4Op5Used))
+ goto InvalidInstruction;
+
+ if (Reg::isZmm(o0 ) && Reg::isZmm(o1) &&
+ Reg::isZmm(o2 ) && Reg::isZmm(o3) &&
+ Reg::isZmm(_op4) && _op5.isMem()) {
+
+ // Registers [o1, o2, o3, _op4] must start aligned and must be consecutive.
+ uint32_t i1 = o1.id();
+ uint32_t i2 = o2.id();
+ uint32_t i3 = o3.id();
+ uint32_t i4 = _op4.id();
+
+ if (ASMJIT_UNLIKELY((i1 & 0x3) != 0 || i2 != i1 + 1 || i3 != i1 + 2 || i4 != i1 + 3))
+ goto NotConsecutiveRegs;
+
+ opReg = o0.id();
+ rmRel = &_op5;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexRmi_Wx:
+ opcode.addWIf(Reg::isGpq(o0) | Reg::isGpq(o1));
+ goto CaseVexRmi;
+
+ case InstDB::kEncodingVexRmi_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRmi:
+CaseVexRmi:
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvm:
+CaseVexRvm:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+CaseVexRvm_R:
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvm_ZDX_Wx:
+ if (ASMJIT_UNLIKELY(!o3.isNone() && !Reg::isGp(o3, Gp::kIdDx)))
+ goto InvalidInstruction;
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvm_Wx:
+ opcode.addWIf(Reg::isGpq(o0) | (o2.size() == 8));
+ goto CaseVexRvm;
+
+ case InstDB::kEncodingVexRvm_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ goto CaseVexRvm;
+
+ case InstDB::kEncodingVexRvmr_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvmr: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+ immValue = o3.id() << 4;
+ immSize = 1;
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexRvmi_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvmi: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+ immValue = o3.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Mem, Imm)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexRmv_Wx:
+ opcode.addWIf(Reg::isGpq(o0) | Reg::isGpq(o2));
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRmv:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRmvRm_VM:
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opcode = x86AltOpcodeOf(instInfo);
+ opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size()));
+
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRmv_VM:
+ if (isign3 == ENC_OPS3(Reg, Mem, Reg)) {
+ opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size() | o2.size()));
+
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+
+ case InstDB::kEncodingVexRmvi: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+ immValue = o3.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Imm)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Mem, Reg, Imm)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexMovdMovq:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ if (Reg::isGp(o0)) {
+ opcode = x86AltOpcodeOf(instInfo);
+ opcode.addWBySize(o0.size());
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitVexEvexR;
+ }
+
+ if (Reg::isGp(o1)) {
+ opcode.addWBySize(o1.size());
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ // If this is a 'W' version (movq) then allow also vmovq 'xmm|xmm' form.
+ if (opcode & Opcode::kEvex_W_1) {
+ opcode &= ~(Opcode::kPP_VEXMask | Opcode::kMM_Mask | 0xFF);
+ opcode |= (Opcode::kF30F00 | 0x7E);
+
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ if (opcode & Opcode::kEvex_W_1) {
+ opcode &= ~(Opcode::kPP_VEXMask | Opcode::kMM_Mask | 0xFF);
+ opcode |= (Opcode::kF30F00 | 0x7E);
+ }
+
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ if (opcode & Opcode::kEvex_W_1) {
+ opcode &= ~(Opcode::kPP_VEXMask | Opcode::kMM_Mask | 0xFF);
+ opcode |= (Opcode::k660F00 | 0xD6);
+ }
+
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRmMr_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRmMr:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode &= Opcode::kLL_Mask;
+ opcode |= x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmRmv:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rbReg = o1.id();
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitVexEvexR;
+
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmRmi_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvmRmi:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+
+ // The following instructions use the secondary opcode.
+ opcode &= Opcode::kLL_Mask;
+ opcode |= x86AltOpcodeOf(instInfo);
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmRmvRmi:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rbReg = o1.id();
+
+ if (!(options & Inst::kOptionModMR))
+ goto EmitVexEvexR;
+
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o2.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+
+ // The following instructions use the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = o0.id();
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmMr:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+
+ // The following instructions use the secondary opcode.
+ opcode = x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = o1.id();
+ rbReg = o0.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmMvr_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvmMvr:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode &= Opcode::kLL_Mask;
+ opcode |= x86AltOpcodeOf(instInfo);
+
+ if (isign3 == ENC_OPS3(Mem, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o2.id(), o1.id());
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexRvmVmi_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvmVmi:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+ goto EmitVexEvexM;
+ }
+
+ // The following instruction uses the secondary opcode.
+ opcode &= Opcode::kLL_Mask;
+ opcode |= x86AltOpcodeOf(instInfo);
+ opReg = opcode.extractO();
+
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexVm_Wx:
+ opcode.addWIf(Reg::isGpq(o0) | Reg::isGpq(o1));
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexVm:
+ if (isign3 == ENC_OPS2(Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexEvexVmi_Lx:
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm))
+ opcode |= Opcode::kMM_ForceEvex;
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexVmi_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexVmi:
+ immValue = o2.as<Imm>().i64();
+ immSize = 1;
+
+CaseVexVmi_AfterImm:
+ if (isign3 == ENC_OPS3(Reg, Reg, Imm)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rbReg = o1.id();
+ goto EmitVexEvexR;
+ }
+
+ if (isign3 == ENC_OPS3(Reg, Mem, Imm)) {
+ opReg = x86PackRegAndVvvvv(opReg, o0.id());
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ case InstDB::kEncodingVexVmi4_Wx:
+ opcode.addWIf(Reg::isGpq(o0) || o1.size() == 8);
+ immValue = o2.as<Imm>().i64();
+ immSize = 4;
+ goto CaseVexVmi_AfterImm;
+
+ case InstDB::kEncodingVexRvrmRvmr_Lx:
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingVexRvrmRvmr: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+
+ immValue = o3.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem)) {
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o3;
+
+ immValue = o2.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexM;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+
+ immValue = o3.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexRvrmiRvmri_Lx: {
+ if (!(options & Inst::kOptionOp4Op5Used) || !_op4.isImm())
+ goto InvalidInstruction;
+
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size() | o2.size() | o3.size());
+
+ immValue = _op4.as<Imm>().u8() & 0x0F;
+ immSize = 1;
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+
+ immValue |= o3.id() << 4;
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem)) {
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o3;
+
+ immValue |= o2.id() << 4;
+ goto EmitVexEvexM;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+
+ immValue |= o3.id() << 4;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+
+ case InstDB::kEncodingVexMovssMovsd:
+ if (isign3 == ENC_OPS3(Reg, Reg, Reg)) {
+ goto CaseVexRvm_R;
+ }
+
+ if (isign3 == ENC_OPS2(Reg, Mem)) {
+ opReg = o0.id();
+ rmRel = &o1;
+ goto EmitVexEvexM;
+ }
+
+ if (isign3 == ENC_OPS2(Mem, Reg)) {
+ opcode = x86AltOpcodeOf(instInfo);
+ opReg = o1.id();
+ rmRel = &o0;
+ goto EmitVexEvexM;
+ }
+ break;
+
+ // ------------------------------------------------------------------------
+ // [FMA4]
+ // ------------------------------------------------------------------------
+
+ case InstDB::kEncodingFma4_Lx:
+ // It's fine to just check the first operand, second is just for sanity.
+ opcode |= x86OpcodeLBySize(o0.size() | o1.size());
+ ASMJIT_FALLTHROUGH;
+
+ case InstDB::kEncodingFma4: {
+ const uint32_t isign4 = isign3 + (o3.opType() << 9);
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rbReg = o2.id();
+
+ immValue = o3.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexR;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Reg, Mem)) {
+ opcode.addW();
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o3;
+
+ immValue = o2.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexM;
+ }
+
+ if (isign4 == ENC_OPS4(Reg, Reg, Mem, Reg)) {
+ opReg = x86PackRegAndVvvvv(o0.id(), o1.id());
+ rmRel = &o2;
+
+ immValue = o3.id() << 4;
+ immSize = 1;
+ goto EmitVexEvexM;
+ }
+ break;
+ }
+ }
+
+ goto InvalidInstruction;
+
+ // --------------------------------------------------------------------------
+ // [Emit - X86]
+ // --------------------------------------------------------------------------
+
+EmitX86OpMovAbs:
+ immSize = FastUInt8(gpSize());
+ writer.emitSegmentOverride(rmRel->as<Mem>().segmentId());
+
+EmitX86Op:
+ // Emit mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ // Emit REX prefix (64-bit only).
+ {
+ uint32_t rex = opcode.extractRex(options);
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+ }
+
+ // Emit instruction opcodes.
+ writer.emitMMAndOpcode(opcode.v);
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+
+EmitX86OpReg:
+ // Emit mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ // Emit REX prefix (64-bit only).
+ {
+ uint32_t rex = opcode.extractRex(options) | (opReg >> 3); // Rex.B (0x01).
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+
+ opReg &= 0x7;
+ }
+
+ // Emit instruction opcodes.
+ opcode += opReg;
+ writer.emitMMAndOpcode(opcode.v);
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+
+EmitX86OpImplicitMem:
+ // NOTE: Don't change the emit order here, it's compatible with KeyStone/LLVM.
+ rmInfo = x86MemInfo[rmRel->as<Mem>().baseAndIndexTypes()];
+ if (ASMJIT_UNLIKELY(rmRel->as<Mem>().hasOffset() || (rmInfo & kX86MemInfo_Index)))
+ goto InvalidInstruction;
+
+ // Emit mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ // Emit REX prefix (64-bit only).
+ {
+ uint32_t rex = opcode.extractRex(options);
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+ }
+
+ writer.emitSegmentOverride(rmRel->as<Mem>().segmentId());
+ writer.emitAddressOverride((rmInfo & _addressOverrideMask()) != 0);
+
+ // Emit instruction opcodes.
+ writer.emitMMAndOpcode(opcode.v);
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+
+EmitX86R:
+ // Mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ // Rex prefix (64-bit only).
+ {
+ uint32_t rex = opcode.extractRex(options) |
+ ((opReg & 0x08) >> 1) | // REX.R (0x04).
+ ((rbReg ) >> 3) ; // REX.B (0x01).
+
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+
+ opReg &= 0x07;
+ rbReg &= 0x07;
+ }
+
+ // Instruction opcodes.
+ writer.emitMMAndOpcode(opcode.v);
+ // ModR.
+ writer.emit8(x86EncodeMod(3, opReg, rbReg));
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+
+EmitX86M:
+ // `rmRel` operand must be memory.
+ ASMJIT_ASSERT(rmRel != nullptr);
+ ASMJIT_ASSERT(rmRel->opType() == Operand::kOpMem);
+ ASMJIT_ASSERT((opcode & Opcode::kCDSHL_Mask) == 0);
+
+ rmInfo = x86MemInfo[rmRel->as<Mem>().baseAndIndexTypes()];
+ writer.emitSegmentOverride(rmRel->as<Mem>().segmentId());
+
+ memOpAOMark = writer.cursor();
+ writer.emitAddressOverride((rmInfo & _addressOverrideMask()) != 0);
+
+ // Mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ rbReg = rmRel->as<Mem>().baseId();
+ rxReg = rmRel->as<Mem>().indexId();
+
+ // REX prefix (64-bit only).
+ {
+ uint32_t rex;
+
+ rex = (rbReg >> 3) & 0x01; // REX.B (0x01).
+ rex |= (rxReg >> 2) & 0x02; // REX.X (0x02).
+ rex |= (opReg >> 1) & 0x04; // REX.R (0x04).
+
+ rex &= rmInfo;
+ rex |= opcode.extractRex(options);
+
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+
+ opReg &= 0x07;
+ }
+
+ // Instruction opcodes.
+ writer.emitMMAndOpcode(opcode.v);
+ // ... Fall through ...
+
+ // --------------------------------------------------------------------------
+ // [Emit - MOD/SIB]
+ // --------------------------------------------------------------------------
+
+EmitModSib:
+ if (!(rmInfo & (kX86MemInfo_Index | kX86MemInfo_67H_X86))) {
+ // ==========|> [BASE + DISP8|DISP32].
+ if (rmInfo & kX86MemInfo_BaseGp) {
+ rbReg &= 0x7;
+ relOffset = rmRel->as<Mem>().offsetLo32();
+
+ uint32_t mod = x86EncodeMod(0, opReg, rbReg);
+ if (rbReg == Gp::kIdSp) {
+ // [XSP|R12].
+ if (relOffset == 0) {
+ writer.emit8(mod);
+ writer.emit8(x86EncodeSib(0, 4, 4));
+ }
+ // [XSP|R12 + DISP8|DISP32].
+ else {
+ uint32_t cdShift = (opcode & Opcode::kCDSHL_Mask) >> Opcode::kCDSHL_Shift;
+ int32_t cdOffset = relOffset >> cdShift;
+
+ if (Support::isInt8(cdOffset) && relOffset == int32_t(uint32_t(cdOffset) << cdShift)) {
+ writer.emit8(mod + 0x40); // <- MOD(1, opReg, rbReg).
+ writer.emit8(x86EncodeSib(0, 4, 4));
+ writer.emit8(cdOffset & 0xFF);
+ }
+ else {
+ writer.emit8(mod + 0x80); // <- MOD(2, opReg, rbReg).
+ writer.emit8(x86EncodeSib(0, 4, 4));
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ }
+ }
+ else if (rbReg != Gp::kIdBp && relOffset == 0) {
+ // [BASE].
+ writer.emit8(mod);
+ }
+ else {
+ // [BASE + DISP8|DISP32].
+ uint32_t cdShift = (opcode & Opcode::kCDSHL_Mask) >> Opcode::kCDSHL_Shift;
+ int32_t cdOffset = relOffset >> cdShift;
+
+ if (Support::isInt8(cdOffset) && relOffset == int32_t(uint32_t(cdOffset) << cdShift)) {
+ writer.emit8(mod + 0x40);
+ writer.emit8(cdOffset & 0xFF);
+ }
+ else {
+ writer.emit8(mod + 0x80);
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ }
+ }
+ // ==========|> [ABSOLUTE | DISP32].
+ else if (!(rmInfo & (kX86MemInfo_BaseLabel | kX86MemInfo_BaseRip))) {
+ uint32_t addrType = rmRel->as<Mem>().addrType();
+ relOffset = rmRel->as<Mem>().offsetLo32();
+
+ if (is32Bit()) {
+ // Explicit relative addressing doesn't work in 32-bit mode.
+ if (ASMJIT_UNLIKELY(addrType == BaseMem::kAddrTypeRel))
+ goto InvalidAddress;
+
+ writer.emit8(x86EncodeMod(0, opReg, 5));
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ else {
+ bool isOffsetI32 = rmRel->as<Mem>().offsetHi32() == (relOffset >> 31);
+ bool isOffsetU32 = rmRel->as<Mem>().offsetHi32() == 0;
+ uint64_t baseAddress = codeInfo().baseAddress();
+
+ // If relative addressing was not explicitly set then we can try to guess.
+ // By guessing we check some properties of the memory operand and try to
+ // base the decision on the segment prefix and the address type.
+ if (addrType == BaseMem::kAddrTypeDefault) {
+ if (baseAddress == Globals::kNoBaseAddress) {
+ // Prefer absolute addressing mode if the offset is 32-bit.
+ addrType = isOffsetI32 || isOffsetU32 ? BaseMem::kAddrTypeAbs
+ : BaseMem::kAddrTypeRel;
+ }
+ else {
+ // Prefer absolute addressing mode if FS|GS segment override is present.
+ bool hasFsGs = rmRel->as<Mem>().segmentId() >= SReg::kIdFs;
+ // Prefer absolute addressing mode if this is LEA with 32-bit immediate.
+ bool isLea32 = (instId == Inst::kIdLea) && (isOffsetI32 || isOffsetU32);
+
+ addrType = hasFsGs || isLea32 ? BaseMem::kAddrTypeAbs
+ : BaseMem::kAddrTypeRel;
+ }
+ }
+
+ if (addrType == BaseMem::kAddrTypeRel) {
+ uint32_t kModRel32Size = 5;
+ uint64_t virtualOffset = uint64_t(writer.offsetFrom(_bufferData)) + immSize + kModRel32Size;
+
+ if (baseAddress == Globals::kNoBaseAddress) {
+ // Create a new RelocEntry as we cannot calculate the offset right now.
+ err = _code->newRelocEntry(&re, RelocEntry::kTypeAbsToRel, 4);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+ writer.emit8(x86EncodeMod(0, opReg, 5));
+ writer.emit32uLE(0);
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_leadingSize = uint8_t(writer.offsetFrom(_bufferPtr) - 4);
+ re->_trailingSize = uint8_t(immSize);
+ re->_payload = uint64_t(rmRel->as<Mem>().offset());
+
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+ }
+ else {
+ uint64_t rip64 = baseAddress + _section->offset() + virtualOffset;
+ uint64_t rel64 = uint64_t(rmRel->as<Mem>().offset()) - rip64;
+
+ if (Support::isInt32(int64_t(rel64))) {
+ writer.emit8(x86EncodeMod(0, opReg, 5));
+ writer.emit32uLE(uint32_t(rel64 & 0xFFFFFFFFu));
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+ }
+ else {
+ // We must check the original address type as we have modified
+ // `addrType`. We failed if the original address type is 'rel'.
+ if (ASMJIT_UNLIKELY(rmRel->as<Mem>().isRel()))
+ goto InvalidAddress;
+ }
+ }
+ }
+
+ // Handle unsigned 32-bit address that doesn't work with sign extension.
+ // Consider the following instructions:
+ //
+ // 1. lea rax, [-1] - Sign extended to 0xFFFFFFFFFFFFFFFF
+ // 2. lea rax, [0xFFFFFFFF] - Zero extended to 0x00000000FFFFFFFF
+ // 3. add rax, [-1] - Sign extended to 0xFFFFFFFFFFFFFFFF
+ // 4. add rax, [0xFFFFFFFF] - Zero extended to 0x00000000FFFFFFFF
+ //
+ // Sign extension is naturally performed by the CPU so we don't have to
+ // bother, however, zero extension requires address-size override prefix,
+ // which we probably don't have at this moment. So to make the address
+ // valid we need to insert it at `memOpAOMark` if it's not already there.
+ //
+ // If this is 'lea' instruction then it's possible to remove REX.W part
+ // from REX prefix (if it's there), which would be one-byte shorter than
+ // inserting address-size override.
+ //
+ // NOTE: If we don't do this then these instructions are unencodable.
+ if (!isOffsetI32) {
+ // 64-bit absolute address is unencodable.
+ if (ASMJIT_UNLIKELY(!isOffsetU32))
+ goto InvalidAddress64Bit;
+
+ // We only patch the existing code if we don't have address-size override.
+ if (*memOpAOMark != 0x67) {
+ if (instId == Inst::kIdLea) {
+ // LEA: Remove REX.W, if present. This is easy as we know that 'lea'
+ // doesn't use any PP prefix so if REX prefix was emitted it would be
+ // at `memOpAOMark`.
+ uint32_t rex = *memOpAOMark;
+ if (rex & kX86ByteRex) {
+ rex &= (~kX86ByteRexW) & 0xFF;
+ *memOpAOMark = uint8_t(rex);
+
+ // We can remove the REX prefix completely if it was not forced.
+ if (rex == kX86ByteRex && !(options & Inst::kOptionRex))
+ writer.remove8(memOpAOMark);
+ }
+ }
+ else {
+ // Any other instruction: Insert address-size override prefix.
+ writer.insert8(memOpAOMark, 0x67);
+ }
+ }
+ }
+
+ // Emit 32-bit absolute address.
+ writer.emit8(x86EncodeMod(0, opReg, 4));
+ writer.emit8(x86EncodeSib(0, 4, 5));
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ }
+ // ==========|> [LABEL|RIP + DISP32]
+ else {
+ writer.emit8(x86EncodeMod(0, opReg, 5));
+
+ if (is32Bit()) {
+EmitModSib_LabelRip_X86:
+ if (ASMJIT_UNLIKELY(_code->_relocations.willGrow(_code->allocator()) != kErrorOk))
+ goto OutOfMemory;
+
+ relOffset = rmRel->as<Mem>().offsetLo32();
+ if (rmInfo & kX86MemInfo_BaseLabel) {
+ // [LABEL->ABS].
+ label = _code->labelEntry(rmRel->as<Mem>().baseId());
+ if (ASMJIT_UNLIKELY(!label))
+ goto InvalidLabel;
+
+ err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, 4);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+ re->_sourceSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_leadingSize = uint8_t(writer.offsetFrom(_bufferPtr));
+ re->_trailingSize = uint8_t(immSize);
+ re->_payload = uint64_t(int64_t(relOffset));
+
+ if (label->isBound()) {
+ // Label bound to the current section.
+ re->_payload += label->offset();
+ re->_targetSectionId = label->section()->id();
+ writer.emit32uLE(0);
+ }
+ else {
+ // Non-bound label or label bound to a different section.
+ relOffset = -4 - immSize;
+ relSize = 4;
+ goto EmitRel;
+ }
+ }
+ else {
+ // [RIP->ABS].
+ err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs, 4);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+ re->_sourceSectionId = _section->id();
+ re->_targetSectionId = _section->id();
+ re->_sourceOffset = offset();
+ re->_leadingSize = uint8_t(writer.offsetFrom(_bufferPtr));
+ re->_trailingSize = uint8_t(immSize);
+ re->_payload = re->_sourceOffset + re->_leadingSize + 4 + re->_trailingSize + uint64_t(int64_t(relOffset));
+
+ writer.emit32uLE(0);
+ }
+ }
+ else {
+ relOffset = rmRel->as<Mem>().offsetLo32();
+ if (rmInfo & kX86MemInfo_BaseLabel) {
+ // [RIP].
+ label = _code->labelEntry(rmRel->as<Mem>().baseId());
+ if (ASMJIT_UNLIKELY(!label))
+ goto InvalidLabel;
+
+ relOffset -= (4 + immSize);
+ if (label->isBoundTo(_section)) {
+ // Label bound to the current section.
+ relOffset += int32_t(label->offset() - writer.offsetFrom(_bufferData));
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ else {
+ // Non-bound label or label bound to a different section.
+ relSize = 4;
+ goto EmitRel;
+ }
+ }
+ else {
+ // [RIP].
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ }
+ }
+ }
+ else if (!(rmInfo & kX86MemInfo_67H_X86)) {
+ // ESP|RSP can't be used as INDEX in pure SIB mode, however, VSIB mode
+ // allows XMM4|YMM4|ZMM4 (that's why the check is before the label).
+ if (ASMJIT_UNLIKELY(rxReg == Gp::kIdSp))
+ goto InvalidAddressIndex;
+
+EmitModVSib:
+ rxReg &= 0x7;
+
+ // ==========|> [BASE + INDEX + DISP8|DISP32].
+ if (rmInfo & kX86MemInfo_BaseGp) {
+ rbReg &= 0x7;
+ relOffset = rmRel->as<Mem>().offsetLo32();
+
+ uint32_t mod = x86EncodeMod(0, opReg, 4);
+ uint32_t sib = x86EncodeSib(rmRel->as<Mem>().shift(), rxReg, rbReg);
+
+ if (relOffset == 0 && rbReg != Gp::kIdBp) {
+ // [BASE + INDEX << SHIFT].
+ writer.emit8(mod);
+ writer.emit8(sib);
+ }
+ else {
+ uint32_t cdShift = (opcode & Opcode::kCDSHL_Mask) >> Opcode::kCDSHL_Shift;
+ int32_t cdOffset = relOffset >> cdShift;
+
+ if (Support::isInt8(cdOffset) && relOffset == int32_t(uint32_t(cdOffset) << cdShift)) {
+ // [BASE + INDEX << SHIFT + DISP8].
+ writer.emit8(mod + 0x40); // <- MOD(1, opReg, 4).
+ writer.emit8(sib);
+ writer.emit8(uint32_t(cdOffset));
+ }
+ else {
+ // [BASE + INDEX << SHIFT + DISP32].
+ writer.emit8(mod + 0x80); // <- MOD(2, opReg, 4).
+ writer.emit8(sib);
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ }
+ }
+ // ==========|> [INDEX + DISP32].
+ else if (!(rmInfo & (kX86MemInfo_BaseLabel | kX86MemInfo_BaseRip))) {
+ // [INDEX << SHIFT + DISP32].
+ writer.emit8(x86EncodeMod(0, opReg, 4));
+ writer.emit8(x86EncodeSib(rmRel->as<Mem>().shift(), rxReg, 5));
+
+ relOffset = rmRel->as<Mem>().offsetLo32();
+ writer.emit32uLE(uint32_t(relOffset));
+ }
+ // ==========|> [LABEL|RIP + INDEX + DISP32].
+ else {
+ if (is32Bit()) {
+ writer.emit8(x86EncodeMod(0, opReg, 4));
+ writer.emit8(x86EncodeSib(rmRel->as<Mem>().shift(), rxReg, 5));
+ goto EmitModSib_LabelRip_X86;
+ }
+ else {
+ // NOTE: This also handles VSIB+RIP, which is not allowed in 64-bit mode.
+ goto InvalidAddress;
+ }
+ }
+ }
+ else {
+ // 16-bit address mode (32-bit mode with 67 override prefix).
+ relOffset = (int32_t(rmRel->as<Mem>().offsetLo32()) << 16) >> 16;
+
+ // NOTE: 16-bit addresses don't use SIB byte and their encoding differs. We
+ // use a table-based approach to calculate the proper MOD byte as it's easier.
+ // Also, not all BASE [+ INDEX] combinations are supported in 16-bit mode, so
+ // this may fail.
+ const uint32_t kBaseGpIdx = (kX86MemInfo_BaseGp | kX86MemInfo_Index);
+
+ if (rmInfo & kBaseGpIdx) {
+ // ==========|> [BASE + INDEX + DISP16].
+ uint32_t mod;
+
+ rbReg &= 0x7;
+ rxReg &= 0x7;
+
+ if ((rmInfo & kBaseGpIdx) == kBaseGpIdx) {
+ uint32_t shf = rmRel->as<Mem>().shift();
+ if (ASMJIT_UNLIKELY(shf != 0))
+ goto InvalidAddress;
+ mod = x86Mod16BaseIndexTable[(rbReg << 3) + rxReg];
+ }
+ else {
+ if (rmInfo & kX86MemInfo_Index)
+ rbReg = rxReg;
+ mod = x86Mod16BaseTable[rbReg];
+ }
+
+ if (ASMJIT_UNLIKELY(mod == 0xFF))
+ goto InvalidAddress;
+
+ mod += opReg << 3;
+ if (relOffset == 0 && mod != 0x06) {
+ writer.emit8(mod);
+ }
+ else if (Support::isInt8(relOffset)) {
+ writer.emit8(mod + 0x40);
+ writer.emit8(uint32_t(relOffset));
+ }
+ else {
+ writer.emit8(mod + 0x80);
+ writer.emit16uLE(uint32_t(relOffset));
+ }
+ }
+ else {
+ // Not supported in 16-bit addresses.
+ if (rmInfo & (kX86MemInfo_BaseRip | kX86MemInfo_BaseLabel))
+ goto InvalidAddress;
+
+ // ==========|> [DISP16].
+ writer.emit8(opReg | 0x06);
+ writer.emit16uLE(uint32_t(relOffset));
+ }
+ }
+
+ writer.emitImmediate(uint64_t(immValue), immSize);
+ goto EmitDone;
+
+ // --------------------------------------------------------------------------
+ // [Emit - FPU]
+ // --------------------------------------------------------------------------
+
+EmitFpuOp:
+ // Mandatory instruction prefix.
+ writer.emitPP(opcode.v);
+
+ // FPU instructions consist of two opcodes.
+ writer.emit8(opcode.v >> Opcode::kFPU_2B_Shift);
+ writer.emit8(opcode.v);
+ goto EmitDone;
+
+ // --------------------------------------------------------------------------
+ // [Emit - VEX / EVEX]
+ // --------------------------------------------------------------------------
+
+EmitVexEvexOp:
+ {
+ // These don't use immediate.
+ ASMJIT_ASSERT(immSize == 0);
+
+ // Only 'vzeroall' and 'vzeroupper' instructions use this encoding, they
+ // don't define 'W' to be '1' so we can just check the 'mmmmm' field. Both
+ // functions can encode by using VEX2 prefix so VEX3 is basically only used
+ // when specified as instruction option.
+ ASMJIT_ASSERT((opcode & Opcode::kW) == 0);
+
+ uint32_t x = ((opcode & Opcode::kMM_Mask ) >> (Opcode::kMM_Shift )) |
+ ((opcode & Opcode::kLL_Mask ) >> (Opcode::kLL_Shift - 10)) |
+ ((opcode & Opcode::kPP_VEXMask ) >> (Opcode::kPP_Shift - 8)) |
+ ((options & Inst::kOptionVex3 ) >> (Opcode::kMM_Shift )) ;
+ if (x & 0x04u) {
+ x = (x & (0x4 ^ 0xFFFF)) << 8; // [00000000|00000Lpp|0000m0mm|00000000].
+ x ^= (kX86ByteVex3) | // [........|00000Lpp|0000m0mm|__VEX3__].
+ (0x07u << 13) | // [........|00000Lpp|1110m0mm|__VEX3__].
+ (0x0Fu << 19) | // [........|01111Lpp|1110m0mm|__VEX3__].
+ (opcode << 24) ; // [_OPCODE_|01111Lpp|1110m0mm|__VEX3__].
+
+ writer.emit32uLE(x);
+ goto EmitDone;
+ }
+ else {
+ x = ((x >> 8) ^ x) ^ 0xF9;
+ writer.emit8(kX86ByteVex2);
+ writer.emit8(x);
+ writer.emit8(opcode.v);
+ goto EmitDone;
+ }
+ }
+
+EmitVexEvexR:
+ {
+ // Construct `x` - a complete EVEX|VEX prefix.
+ uint32_t x = ((opReg << 4) & 0xF980u) | // [........|........|Vvvvv..R|R.......].
+ ((rbReg << 2) & 0x0060u) | // [........|........|........|.BB.....].
+ (opcode.extractLLMM(options)) | // [........|.LL.....|Vvvvv..R|RBBmmmmm].
+ (_extraReg.id() << 16); // [........|.LL..aaa|Vvvvv..R|RBBmmmmm].
+ opReg &= 0x7;
+
+ // Handle AVX512 options by a single branch.
+ const uint32_t kAvx512Options = Inst::kOptionZMask | Inst::kOptionER | Inst::kOptionSAE;
+ if (options & kAvx512Options) {
+ uint32_t kBcstMask = 0x1 << 20;
+ uint32_t kLLMask10 = 0x2 << 21;
+ uint32_t kLLMask11 = 0x3 << 21;
+
+ // Designed to be easily encodable so the position must be exact.
+ // The {rz-sae} is encoded as {11}, so it should match the mask.
+ ASMJIT_ASSERT(Inst::kOptionRZ_SAE == kLLMask11);
+
+ x |= options & Inst::kOptionZMask; // [........|zLLb.aaa|Vvvvv..R|RBBmmmmm].
+
+ // Support embedded-rounding {er} and suppress-all-exceptions {sae}.
+ if (options & (Inst::kOptionER | Inst::kOptionSAE)) {
+ // Embedded rounding is only encodable if the instruction is either
+ // scalar or it's a 512-bit operation as the {er} rounding predicate
+ // collides with LL part of the instruction.
+ if ((x & kLLMask11) != kLLMask10) {
+ // Ok, so LL is not 10, thus the instruction must be scalar.
+ // Scalar instructions don't support broadcast so if this
+ // instruction supports it {er} nor {sae} would be encodable.
+ if (ASMJIT_UNLIKELY(commonInfo->hasAvx512B()))
+ goto InvalidEROrSAE;
+ }
+
+ if (options & Inst::kOptionER) {
+ if (ASMJIT_UNLIKELY(!commonInfo->hasAvx512ER()))
+ goto InvalidEROrSAE;
+
+ x &=~kLLMask11; // [........|.00..aaa|Vvvvv..R|RBBmmmmm].
+ x |= kBcstMask | (options & kLLMask11); // [........|.LLb.aaa|Vvvvv..R|RBBmmmmm].
+ }
+ else {
+ if (ASMJIT_UNLIKELY(!commonInfo->hasAvx512SAE()))
+ goto InvalidEROrSAE;
+
+ x |= kBcstMask; // [........|.LLb.aaa|Vvvvv..R|RBBmmmmm].
+ }
+ }
+ }
+
+ // Check if EVEX is required by checking bits in `x` : [........|xx.x.xxx|x......x|.x.x....].
+ if (x & 0x00D78150u) {
+ uint32_t y = ((x << 4) & 0x00080000u) | // [........|...bV...|........|........].
+ ((x >> 4) & 0x00000010u) ; // [........|...bV...|........|...R....].
+ x = (x & 0x00FF78E3u) | y; // [........|zLLbVaaa|0vvvv000|RBBR00mm].
+ x = x << 8; // [zLLbVaaa|0vvvv000|RBBR00mm|00000000].
+ x |= (opcode >> kVSHR_W ) & 0x00800000u; // [zLLbVaaa|Wvvvv000|RBBR00mm|00000000].
+ x |= (opcode >> kVSHR_PP_EW) & 0x00830000u; // [zLLbVaaa|Wvvvv0pp|RBBR00mm|00000000] (added PP and EVEX.W).
+ // _ ____ ____
+ x ^= 0x087CF000u | kX86ByteEvex; // [zLLbVaaa|Wvvvv1pp|RBBR00mm|01100010].
+
+ writer.emit32uLE(x);
+ writer.emit8(opcode.v);
+
+ rbReg &= 0x7;
+ writer.emit8(x86EncodeMod(3, opReg, rbReg));
+ writer.emitImmByteOrDWord(immValue, immSize);
+ goto EmitDone;
+ }
+
+ // Not EVEX, prepare `x` for VEX2 or VEX3: x = [........|00L00000|0vvvv000|R0B0mmmm].
+ x |= ((opcode >> (kVSHR_W + 8)) & 0x8000u) | // [00000000|00L00000|Wvvvv000|R0B0mmmm].
+ ((opcode >> (kVSHR_PP + 8)) & 0x0300u) | // [00000000|00L00000|0vvvv0pp|R0B0mmmm].
+ ((x >> 11 ) & 0x0400u) ; // [00000000|00L00000|WvvvvLpp|R0B0mmmm].
+
+ // Check if VEX3 is required / forced: [........|........|x.......|..x..x..].
+ if (x & 0x0008024u) {
+ uint32_t xorMsk = x86VEXPrefix[x & 0xF] | (opcode << 24);
+
+ // Clear 'FORCE-VEX3' bit and all high bits.
+ x = (x & (0x4 ^ 0xFFFF)) << 8; // [00000000|WvvvvLpp|R0B0m0mm|00000000].
+ // ____ _ _
+ x ^= xorMsk; // [_OPCODE_|WvvvvLpp|R1Bmmmmm|VEX3|XOP].
+ writer.emit32uLE(x);
+
+ rbReg &= 0x7;
+ writer.emit8(x86EncodeMod(3, opReg, rbReg));
+ writer.emitImmByteOrDWord(immValue, immSize);
+ goto EmitDone;
+ }
+ else {
+ // 'mmmmm' must be '00001'.
+ ASMJIT_ASSERT((x & 0x1F) == 0x01);
+
+ x = ((x >> 8) ^ x) ^ 0xF9;
+ writer.emit8(kX86ByteVex2);
+ writer.emit8(x);
+ writer.emit8(opcode.v);
+
+ rbReg &= 0x7;
+ writer.emit8(x86EncodeMod(3, opReg, rbReg));
+ writer.emitImmByteOrDWord(immValue, immSize);
+ goto EmitDone;
+ }
+ }
+
+EmitVexEvexM:
+ ASMJIT_ASSERT(rmRel != nullptr);
+ ASMJIT_ASSERT(rmRel->opType() == Operand::kOpMem);
+
+ rmInfo = x86MemInfo[rmRel->as<Mem>().baseAndIndexTypes()];
+ writer.emitSegmentOverride(rmRel->as<Mem>().segmentId());
+
+ memOpAOMark = writer.cursor();
+ writer.emitAddressOverride((rmInfo & _addressOverrideMask()) != 0);
+
+ rbReg = rmRel->as<Mem>().hasBaseReg() ? rmRel->as<Mem>().baseId() : uint32_t(0);
+ rxReg = rmRel->as<Mem>().hasIndexReg() ? rmRel->as<Mem>().indexId() : uint32_t(0);
+
+ {
+ uint32_t broadcastBit = uint32_t(rmRel->as<Mem>().hasBroadcast());
+
+ // Construct `x` - a complete EVEX|VEX prefix.
+ uint32_t x = ((opReg << 4) & 0x0000F980u) | // [........|........|Vvvvv..R|R.......].
+ ((rxReg << 3) & 0x00000040u) | // [........|........|........|.X......].
+ ((rxReg << 15) & 0x00080000u) | // [........|....X...|........|........].
+ ((rbReg << 2) & 0x00000020u) | // [........|........|........|..B.....].
+ opcode.extractLLMM(options) | // [........|.LL.X...|Vvvvv..R|RXBmmmmm].
+ (_extraReg.id() << 16) | // [........|.LL.Xaaa|Vvvvv..R|RXBmmmmm].
+ (broadcastBit << 20) ; // [........|.LLbXaaa|Vvvvv..R|RXBmmmmm].
+ opReg &= 0x07u;
+
+ // Mark invalid VEX (force EVEX) case: // [@.......|.LLbXaaa|Vvvvv..R|RXBmmmmm].
+ x |= (~commonInfo->flags() & InstDB::kFlagVex) << (31 - Support::constCtz(InstDB::kFlagVex));
+
+ // Handle AVX512 options by a single branch.
+ const uint32_t kAvx512Options = Inst::kOptionZMask |
+ Inst::kOptionER |
+ Inst::kOptionSAE ;
+ if (options & kAvx512Options) {
+ // {er} and {sae} are both invalid if memory operand is used.
+ if (ASMJIT_UNLIKELY(options & (Inst::kOptionER | Inst::kOptionSAE)))
+ goto InvalidEROrSAE;
+
+ x |= options & (Inst::kOptionZMask); // [@.......|zLLbXaaa|Vvvvv..R|RXBmmmmm].
+ }
+
+ // Check if EVEX is required by checking bits in `x` : [@.......|xx.xxxxx|x......x|...x....].
+ if (x & 0x80DF8110u) {
+ uint32_t y = ((x << 4) & 0x00080000u) | // [@.......|....V...|........|........].
+ ((x >> 4) & 0x00000010u) ; // [@.......|....V...|........|...R....].
+ x = (x & 0x00FF78E3u) | y; // [........|zLLbVaaa|0vvvv000|RXBR00mm].
+ x = x << 8; // [zLLbVaaa|0vvvv000|RBBR00mm|00000000].
+ x |= (opcode >> kVSHR_W ) & 0x00800000u; // [zLLbVaaa|Wvvvv000|RBBR00mm|00000000].
+ x |= (opcode >> kVSHR_PP_EW) & 0x00830000u; // [zLLbVaaa|Wvvvv0pp|RBBR00mm|00000000] (added PP and EVEX.W).
+ // _ ____ ____
+ x ^= 0x087CF000u | kX86ByteEvex; // [zLLbVaaa|Wvvvv1pp|RBBR00mm|01100010].
+
+ writer.emit32uLE(x);
+ writer.emit8(opcode.v);
+
+ if (x & 0x10000000u) {
+ // Broadcast, change the compressed displacement scale to either x4 (SHL 2) or x8 (SHL 3)
+ // depending on instruction's W. If 'W' is 1 'SHL' must be 3, otherwise it must be 2.
+ opcode &=~uint32_t(Opcode::kCDSHL_Mask);
+ opcode |= ((x & 0x00800000u) ? 3u : 2u) << Opcode::kCDSHL_Shift;
+ }
+ else {
+ // Add the compressed displacement 'SHF' to the opcode based on 'TTWLL'.
+ // The index to `x86CDisp8SHL` is composed as `CDTT[4:3] | W[2] | LL[1:0]`.
+ uint32_t TTWLL = ((opcode >> (Opcode::kCDTT_Shift - 3)) & 0x18) +
+ ((opcode >> (Opcode::kW_Shift - 2)) & 0x04) +
+ ((x >> 29) & 0x3);
+ opcode += x86CDisp8SHL[TTWLL];
+ }
+ }
+ else {
+ // Not EVEX, prepare `x` for VEX2 or VEX3: x = [........|00L00000|0vvvv000|RXB0mmmm].
+ x |= ((opcode >> (kVSHR_W + 8)) & 0x8000u) | // [00000000|00L00000|Wvvvv000|RXB0mmmm].
+ ((opcode >> (kVSHR_PP + 8)) & 0x0300u) | // [00000000|00L00000|Wvvvv0pp|RXB0mmmm].
+ ((x >> 11 ) & 0x0400u) ; // [00000000|00L00000|WvvvvLpp|RXB0mmmm].
+
+ // Clear a possible CDisp specified by EVEX.
+ opcode &= ~Opcode::kCDSHL_Mask;
+
+ // Check if VEX3 is required / forced: [........|........|x.......|.xx..x..].
+ if (x & 0x0008064u) {
+ uint32_t xorMsk = x86VEXPrefix[x & 0xF] | (opcode << 24);
+
+ // Clear 'FORCE-VEX3' bit and all high bits.
+ x = (x & (0x4 ^ 0xFFFF)) << 8; // [00000000|WvvvvLpp|RXB0m0mm|00000000].
+ // ____ ___
+ x ^= xorMsk; // [_OPCODE_|WvvvvLpp|RXBmmmmm|VEX3_XOP].
+ writer.emit32uLE(x);
+ }
+ else {
+ // 'mmmmm' must be '00001'.
+ ASMJIT_ASSERT((x & 0x1F) == 0x01);
+
+ x = ((x >> 8) ^ x) ^ 0xF9;
+ writer.emit8(kX86ByteVex2);
+ writer.emit8(x);
+ writer.emit8(opcode.v);
+ }
+ }
+ }
+
+ // MOD|SIB address.
+ if (!commonInfo->hasFlag(InstDB::kFlagVsib))
+ goto EmitModSib;
+
+ // MOD|VSIB address without INDEX is invalid.
+ if (rmInfo & kX86MemInfo_Index)
+ goto EmitModVSib;
+ goto InvalidInstruction;
+
+ // --------------------------------------------------------------------------
+ // [Emit - Jmp/Jcc/Call]
+ // --------------------------------------------------------------------------
+
+EmitJmpCall:
+ {
+ // Emit REX prefix if asked for (64-bit only).
+ uint32_t rex = opcode.extractRex(options);
+ if (ASMJIT_UNLIKELY(x86IsRexInvalid(rex)))
+ goto InvalidRexPrefix;
+ rex &= ~kX86ByteInvalidRex & 0xFF;
+ writer.emit8If(rex | kX86ByteRex, rex != 0);
+
+ uint64_t ip = uint64_t(writer.offsetFrom(_bufferData));
+ uint32_t rel32 = 0;
+ uint32_t opCode8 = x86AltOpcodeOf(instInfo);
+
+ uint32_t inst8Size = 1 + 1; // OPCODE + REL8 .
+ uint32_t inst32Size = 1 + 4; // [PREFIX] OPCODE + REL32.
+
+ // Jcc instructions with 32-bit displacement use 0x0F prefix,
+ // other instructions don't. No other prefixes are used by X86.
+ ASMJIT_ASSERT((opCode8 & Opcode::kMM_Mask) == 0);
+ ASMJIT_ASSERT((opcode & Opcode::kMM_Mask) == 0 ||
+ (opcode & Opcode::kMM_Mask) == Opcode::kMM_0F);
+
+ // Only one of these should be used at the same time.
+ inst32Size += uint32_t(opReg != 0);
+ inst32Size += uint32_t((opcode & Opcode::kMM_Mask) == Opcode::kMM_0F);
+
+ if (rmRel->isLabel()) {
+ label = _code->labelEntry(rmRel->as<Label>());
+ if (ASMJIT_UNLIKELY(!label))
+ goto InvalidLabel;
+
+ if (label->isBoundTo(_section)) {
+ // Label bound to the current section.
+ rel32 = uint32_t((label->offset() - ip - inst32Size) & 0xFFFFFFFFu);
+ goto EmitJmpCallRel;
+ }
+ else {
+ // Non-bound label or label bound to a different section.
+ if (opCode8 && (!opcode.v || (options & Inst::kOptionShortForm))) {
+ writer.emit8(opCode8);
+
+ // Record DISP8 (non-bound label).
+ relOffset = -1;
+ relSize = 1;
+ goto EmitRel;
+ }
+ else {
+ // Refuse also 'short' prefix, if specified.
+ if (ASMJIT_UNLIKELY(!opcode.v || (options & Inst::kOptionShortForm) != 0))
+ goto InvalidDisplacement;
+
+ writer.emit8If(0x0F, (opcode & Opcode::kMM_Mask) != 0);// Emit 0F prefix.
+ writer.emit8(opcode.v); // Emit opcode.
+ writer.emit8If(x86EncodeMod(3, opReg, 0), opReg != 0); // Emit MOD.
+
+ // Record DISP32 (non-bound label).
+ relOffset = -4;
+ relSize = 4;
+ goto EmitRel;
+ }
+ }
+ }
+
+ if (rmRel->isImm()) {
+ uint64_t baseAddress = codeInfo().baseAddress();
+ uint64_t jumpAddress = rmRel->as<Imm>().u64();
+
+ // If the base-address is known calculate a relative displacement and
+ // check if it fits in 32 bits (which is always true in 32-bit mode).
+ // Emit relative displacement as it was a bound label if all checks are ok.
+ if (baseAddress != Globals::kNoBaseAddress) {
+ uint64_t rel64 = jumpAddress - (ip + baseAddress) - inst32Size;
+ if (archId() == ArchInfo::kIdX86 || Support::isInt32(int64_t(rel64))) {
+ rel32 = uint32_t(rel64 & 0xFFFFFFFFu);
+ goto EmitJmpCallRel;
+ }
+ else {
+ // Relative displacement exceeds 32-bits - relocator can only
+ // insert trampoline for jmp/call, but not for jcc/jecxz.
+ if (ASMJIT_UNLIKELY(!x86IsJmpOrCall(instId)))
+ goto InvalidDisplacement;
+ }
+ }
+
+ err = _code->newRelocEntry(&re, RelocEntry::kTypeAbsToRel, 0);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+ re->_sourceOffset = offset();
+ re->_sourceSectionId = _section->id();
+ re->_payload = jumpAddress;
+
+ if (ASMJIT_LIKELY(opcode.v)) {
+ // 64-bit: Emit REX prefix so the instruction can be patched later.
+ // REX prefix does nothing if not patched, but allows to patch the
+ // instruction to use MOD/M and to point to a memory where the final
+ // 64-bit address is stored.
+ if (archId() != ArchInfo::kIdX86 && x86IsJmpOrCall(instId)) {
+ if (!rex)
+ writer.emit8(kX86ByteRex);
+
+ err = _code->addAddressToAddressTable(jumpAddress);
+ if (ASMJIT_UNLIKELY(err))
+ goto Failed;
+
+ re->_relocType = RelocEntry::kTypeX64AddressEntry;
+ }
+
+ writer.emit8If(0x0F, (opcode & Opcode::kMM_Mask) != 0); // Emit 0F prefix.
+ writer.emit8(opcode.v); // Emit opcode.
+ writer.emit8If(x86EncodeMod(3, opReg, 0), opReg != 0); // Emit MOD.
+ writer.emit32uLE(0); // Emit DISP32.
+
+ re->_valueSize = 4;
+ re->_leadingSize = uint8_t(writer.offsetFrom(_bufferPtr) - 4);
+ re->_trailingSize = uint8_t(immSize);
+ }
+ else {
+ writer.emit8(opCode8); // Emit opcode.
+ writer.emit8(0); // Emit DISP8 (zero).
+
+ re->_valueSize = 1;
+ re->_leadingSize = uint8_t(writer.offsetFrom(_bufferPtr) - 1);
+ re->_trailingSize = uint8_t(immSize);
+ }
+ goto EmitDone;
+ }
+
+ // Not Label|Imm -> Invalid.
+ goto InvalidInstruction;
+
+ // Emit jmp/call with relative displacement known at assembly-time. Decide
+ // between 8-bit and 32-bit displacement encoding. Some instructions only
+ // allow either 8-bit or 32-bit encoding, others allow both encodings.
+EmitJmpCallRel:
+ if (Support::isInt8(int32_t(rel32 + inst32Size - inst8Size)) && opCode8 && !(options & Inst::kOptionLongForm)) {
+ options |= Inst::kOptionShortForm;
+ writer.emit8(opCode8); // Emit opcode
+ writer.emit8(rel32 + inst32Size - inst8Size); // Emit DISP8.
+ goto EmitDone;
+ }
+ else {
+ if (ASMJIT_UNLIKELY(!opcode.v || (options & Inst::kOptionShortForm) != 0))
+ goto InvalidDisplacement;
+
+ options &= ~Inst::kOptionShortForm;
+ writer.emit8If(0x0F, (opcode & Opcode::kMM_Mask) != 0); // Emit 0x0F prefix.
+ writer.emit8(opcode.v); // Emit Opcode.
+ writer.emit8If(x86EncodeMod(3, opReg, 0), opReg != 0); // Emit MOD.
+ writer.emit32uLE(rel32); // Emit DISP32.
+ goto EmitDone;
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Emit - Relative]
+ // --------------------------------------------------------------------------
+
+EmitRel:
+ {
+ ASMJIT_ASSERT(relSize == 1 || relSize == 4);
+
+ // Chain with label.
+ size_t offset = size_t(writer.offsetFrom(_bufferData));
+ LabelLink* link = _code->newLabelLink(label, _section->id(), offset, relOffset);
+
+ if (ASMJIT_UNLIKELY(!link))
+ goto OutOfMemory;
+
+ if (re)
+ link->relocId = re->id();
+
+ // Emit label size as dummy data.
+ if (relSize == 1)
+ writer.emit8(0x01);
+ else // if (relSize == 4)
+ writer.emit32uLE(0x04040404);
+ }
+ writer.emitImmediate(uint64_t(immValue), immSize);
+
+ // --------------------------------------------------------------------------
+ // [Done]
+ // --------------------------------------------------------------------------
+
+EmitDone:
+ if (ASMJIT_UNLIKELY(options & Inst::kOptionReserved)) {
+#ifndef ASMJIT_NO_LOGGING
+ if (hasEmitterOption(kOptionLoggingEnabled))
+ _emitLog(instId, options, o0, o1, o2, o3, relSize, immSize, writer.cursor());
+#endif
+ }
+
+ resetInstOptions();
+ resetExtraReg();
+ resetInlineComment();
+
+ writer.done(this);
+ return kErrorOk;
+
+ // --------------------------------------------------------------------------
+ // [Error Cases]
+ // --------------------------------------------------------------------------
+
+ #define ERROR_HANDLER(ERROR) \
+ ERROR: \
+ err = DebugUtils::errored(kError##ERROR); \
+ goto Failed;
+
+ ERROR_HANDLER(OutOfMemory)
+ ERROR_HANDLER(InvalidLabel)
+ ERROR_HANDLER(InvalidInstruction)
+ ERROR_HANDLER(InvalidLockPrefix)
+ ERROR_HANDLER(InvalidXAcquirePrefix)
+ ERROR_HANDLER(InvalidXReleasePrefix)
+ ERROR_HANDLER(InvalidRepPrefix)
+ ERROR_HANDLER(InvalidRexPrefix)
+ ERROR_HANDLER(InvalidEROrSAE)
+ ERROR_HANDLER(InvalidAddress)
+ ERROR_HANDLER(InvalidAddressIndex)
+ ERROR_HANDLER(InvalidAddress64Bit)
+ ERROR_HANDLER(InvalidDisplacement)
+ ERROR_HANDLER(InvalidSegment)
+ ERROR_HANDLER(InvalidImmediate)
+ ERROR_HANDLER(OperandSizeMismatch)
+ ERROR_HANDLER(AmbiguousOperandSize)
+ ERROR_HANDLER(NotConsecutiveRegs)
+
+ #undef ERROR_HANDLER
+
+Failed:
+ return _emitFailed(err, instId, options, o0, o1, o2, o3);
+}
+
+// ============================================================================
+// [asmjit::x86::Assembler - Align]
+// ============================================================================
+
+Error Assembler::align(uint32_t alignMode, uint32_t alignment) {
+ if (ASMJIT_UNLIKELY(alignMode >= kAlignCount))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ if (alignment <= 1)
+ return kErrorOk;
+
+ if (ASMJIT_UNLIKELY(!Support::isPowerOf2(alignment) || alignment > Globals::kMaxAlignment))
+ return reportError(DebugUtils::errored(kErrorInvalidArgument));
+
+ uint32_t i = uint32_t(Support::alignUpDiff<size_t>(offset(), alignment));
+ if (i > 0) {
+ CodeBufferWriter writer(this);
+ ASMJIT_PROPAGATE(writer.ensureSpace(this, i));
+
+ uint8_t pattern = 0x00;
+ switch (alignMode) {
+ case kAlignCode: {
+ if (hasEmitterOption(kOptionOptimizedAlign)) {
+ // Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 2B (NOP).
+ enum { kMaxNopSize = 9 };
+
+ static const uint8_t nopData[kMaxNopSize][kMaxNopSize] = {
+ { 0x90 },
+ { 0x66, 0x90 },
+ { 0x0F, 0x1F, 0x00 },
+ { 0x0F, 0x1F, 0x40, 0x00 },
+ { 0x0F, 0x1F, 0x44, 0x00, 0x00 },
+ { 0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00 },
+ { 0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00 },
+ { 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ { 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00 }
+ };
+
+ do {
+ uint32_t n = Support::min<uint32_t>(i, kMaxNopSize);
+ const uint8_t* src = nopData[n - 1];
+
+ i -= n;
+ do {
+ writer.emit8(*src++);
+ } while (--n);
+ } while (i);
+ }
+
+ pattern = 0x90;
+ break;
+ }
+
+ case kAlignData:
+ pattern = 0xCC;
+ break;
+
+ case kAlignZero:
+ // Pattern already set to zero.
+ break;
+ }
+
+ while (i) {
+ writer.emit8(pattern);
+ i--;
+ }
+
+ writer.done(this);
+ }
+
+#ifndef ASMJIT_NO_LOGGING
+ if (hasEmitterOption(kOptionLoggingEnabled)) {
+ Logger* logger = _code->logger();
+ StringTmp<128> sb;
+ sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
+ sb.appendFormat("align %u\n", alignment);
+ logger->log(sb);
+ }
+#endif
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::x86::Assembler - Events]
+// ============================================================================
+
+Error Assembler::onAttach(CodeHolder* code) noexcept {
+ uint32_t archId = code->archId();
+ if (!ArchInfo::isX86Family(archId))
+ return DebugUtils::errored(kErrorInvalidArch);
+
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ if (archId == ArchInfo::kIdX86) {
+ // 32 bit architecture - X86.
+ _gpRegInfo.setSignature(Gpd::kSignature);
+ _globalInstOptions |= Inst::_kOptionInvalidRex;
+ _setAddressOverrideMask(kX86MemInfo_67H_X86);
+ }
+ else {
+ // 64 bit architecture - X64.
+ _gpRegInfo.setSignature(Gpq::kSignature);
+ _globalInstOptions &= ~Inst::_kOptionInvalidRex;
+ _setAddressOverrideMask(kX86MemInfo_67H_X64);
+ }
+
+ return kErrorOk;
+}
+
+Error Assembler::onDetach(CodeHolder* code) noexcept {
+ return Base::onDetach(code);
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86assembler.h b/3rdparty/asmjit/src/asmjit/x86/x86assembler.h
new file mode 100644
index 00000000000..3e3027bab4a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86assembler.h
@@ -0,0 +1,102 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86ASSEMBLER_H_INCLUDED
+#define ASMJIT_X86_X86ASSEMBLER_H_INCLUDED
+
+#include "../core/assembler.h"
+#include "../x86/x86emitter.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::Assembler]
+// ============================================================================
+
+//! Assembler (X86).
+//!
+//! Emits X86 machine-code into buffers managed by `CodeHolder`.
+class ASMJIT_VIRTAPI Assembler
+ : public BaseAssembler,
+ public EmitterImplicitT<Assembler> {
+public:
+ ASMJIT_NONCOPYABLE(Assembler)
+ typedef BaseAssembler Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit Assembler(CodeHolder* code = nullptr) noexcept;
+ ASMJIT_API virtual ~Assembler() noexcept;
+
+ //! \}
+
+ //! \cond INTERNAL
+ //! \name Internal
+ //! \{
+
+ // NOTE: x86::Assembler uses _privateData to store 'address-override' bit that
+ // is used to decide whether to emit address-override (67H) prefix based on
+ // the memory BASE+INDEX registers. It's either `kX86MemInfo_67H_X86` or
+ // `kX86MemInfo_67H_X64`.
+ inline uint32_t _addressOverrideMask() const noexcept { return _privateData; }
+ inline void _setAddressOverrideMask(uint32_t m) noexcept { _privateData = m; }
+
+ //! \}
+ //! \endcond
+
+ //! \cond INTERNAL
+ //! \name Emit
+ //! \{
+
+ using BaseEmitter::_emit;
+ ASMJIT_API Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) override;
+
+ //! \}
+ //! \endcond
+
+ //! \name Align
+ //! \{
+
+ ASMJIT_API Error align(uint32_t alignMode, uint32_t alignment) override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+ ASMJIT_API Error onDetach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86ASSEMBLER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86builder.cpp b/3rdparty/asmjit/src/asmjit/x86/x86builder.cpp
new file mode 100644
index 00000000000..4e65c7b16b5
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86builder.cpp
@@ -0,0 +1,69 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_NO_BUILDER)
+
+#include "../x86/x86assembler.h"
+#include "../x86/x86builder.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::Builder - Construction / Destruction]
+// ============================================================================
+
+Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
+ if (code)
+ code->attach(this);
+}
+Builder::~Builder() noexcept {}
+
+// ============================================================================
+// [asmjit::x86::Builder - Finalize]
+// ============================================================================
+
+Error Builder::finalize() {
+ ASMJIT_PROPAGATE(runPasses());
+ Assembler a(_code);
+ return serialize(&a);
+}
+
+// ============================================================================
+// [asmjit::x86::Builder - Events]
+// ============================================================================
+
+Error Builder::onAttach(CodeHolder* code) noexcept {
+ uint32_t archId = code->archId();
+ if (!ArchInfo::isX86Family(archId))
+ return DebugUtils::errored(kErrorInvalidArch);
+
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+
+ _gpRegInfo.setSignature(archId == ArchInfo::kIdX86 ? uint32_t(Gpd::kSignature) : uint32_t(Gpq::kSignature));
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86 && !ASMJIT_NO_BUILDER
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86builder.h b/3rdparty/asmjit/src/asmjit/x86/x86builder.h
new file mode 100644
index 00000000000..66e2dfc99d8
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86builder.h
@@ -0,0 +1,79 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86BUILDER_H_INCLUDED
+#define ASMJIT_X86_X86BUILDER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_BUILDER
+
+#include "../core/builder.h"
+#include "../core/datatypes.h"
+#include "../x86/x86emitter.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::Builder]
+// ============================================================================
+
+//! Architecture-dependent asm-builder (X86).
+class ASMJIT_VIRTAPI Builder
+ : public BaseBuilder,
+ public EmitterImplicitT<Builder> {
+public:
+ ASMJIT_NONCOPYABLE(Builder)
+ typedef BaseBuilder Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit Builder(CodeHolder* code = nullptr) noexcept;
+ ASMJIT_API virtual ~Builder() noexcept;
+
+ //! \}
+
+ //! \name Finalize
+ //! \{
+
+ ASMJIT_API Error finalize() override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // !ASMJIT_NO_BUILDER
+#endif // ASMJIT_X86_X86BUILDER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86callconv.cpp b/3rdparty/asmjit/src/asmjit/x86/x86callconv.cpp
new file mode 100644
index 00000000000..7ec4c55ef0a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86callconv.cpp
@@ -0,0 +1,163 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../x86/x86callconv_p.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::CallConvInternal - Init]
+// ============================================================================
+
+static inline void CallConv_initX86Common(CallConv& cc) noexcept {
+ cc.setNaturalStackAlignment(4);
+ cc.setArchType(ArchInfo::kIdX86);
+ cc.setPreservedRegs(Reg::kGroupGp, Support::bitMask(Gp::kIdBx, Gp::kIdSp, Gp::kIdBp, Gp::kIdSi, Gp::kIdDi));
+}
+
+ASMJIT_FAVOR_SIZE Error CallConvInternal::init(CallConv& cc, uint32_t ccId) noexcept {
+ constexpr uint32_t kGroupGp = Reg::kGroupGp;
+ constexpr uint32_t kGroupVec = Reg::kGroupVec;
+ constexpr uint32_t kGroupMm = Reg::kGroupMm;
+ constexpr uint32_t kGroupKReg = Reg::kGroupKReg;
+
+ constexpr uint32_t kZax = Gp::kIdAx;
+ constexpr uint32_t kZbx = Gp::kIdBx;
+ constexpr uint32_t kZcx = Gp::kIdCx;
+ constexpr uint32_t kZdx = Gp::kIdDx;
+ constexpr uint32_t kZsp = Gp::kIdSp;
+ constexpr uint32_t kZbp = Gp::kIdBp;
+ constexpr uint32_t kZsi = Gp::kIdSi;
+ constexpr uint32_t kZdi = Gp::kIdDi;
+
+ switch (ccId) {
+ case CallConv::kIdX86StdCall:
+ cc.setFlags(CallConv::kFlagCalleePopsStack);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86MsThisCall:
+ cc.setFlags(CallConv::kFlagCalleePopsStack);
+ cc.setPassedOrder(kGroupGp, kZcx);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86MsFastCall:
+ case CallConv::kIdX86GccFastCall:
+ cc.setFlags(CallConv::kFlagCalleePopsStack);
+ cc.setPassedOrder(kGroupGp, kZcx, kZdx);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86GccRegParm1:
+ cc.setPassedOrder(kGroupGp, kZax);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86GccRegParm2:
+ cc.setPassedOrder(kGroupGp, kZax, kZdx);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86GccRegParm3:
+ cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx);
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86CDecl:
+ CallConv_initX86Common(cc);
+ break;
+
+ case CallConv::kIdX86Win64:
+ cc.setArchType(ArchInfo::kIdX64);
+ cc.setStrategy(CallConv::kStrategyWin64);
+ cc.setFlags(CallConv::kFlagPassFloatsByVec | CallConv::kFlagIndirectVecArgs);
+ cc.setNaturalStackAlignment(16);
+ cc.setSpillZoneSize(32);
+ cc.setPassedOrder(kGroupGp, kZcx, kZdx, 8, 9);
+ cc.setPassedOrder(kGroupVec, 0, 1, 2, 3);
+ cc.setPreservedRegs(kGroupGp, Support::bitMask(kZbx, kZsp, kZbp, kZsi, kZdi, 12, 13, 14, 15));
+ cc.setPreservedRegs(kGroupVec, Support::bitMask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
+ break;
+
+ case CallConv::kIdX86SysV64:
+ cc.setArchType(ArchInfo::kIdX64);
+ cc.setFlags(CallConv::kFlagPassFloatsByVec);
+ cc.setNaturalStackAlignment(16);
+ cc.setRedZoneSize(128);
+ cc.setPassedOrder(kGroupGp, kZdi, kZsi, kZdx, kZcx, 8, 9);
+ cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
+ cc.setPreservedRegs(kGroupGp, Support::bitMask(kZbx, kZsp, kZbp, 12, 13, 14, 15));
+ break;
+
+ case CallConv::kIdX86LightCall2:
+ case CallConv::kIdX86LightCall3:
+ case CallConv::kIdX86LightCall4: {
+ uint32_t n = (ccId - CallConv::kIdX86LightCall2) + 2;
+
+ cc.setArchType(ArchInfo::kIdX86);
+ cc.setFlags(CallConv::kFlagPassFloatsByVec);
+ cc.setNaturalStackAlignment(16);
+ cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx, kZsi, kZdi);
+ cc.setPassedOrder(kGroupMm, 0, 1, 2, 3, 4, 5, 6, 7);
+ cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
+ cc.setPassedOrder(kGroupKReg, 0, 1, 2, 3, 4, 5, 6, 7);
+
+ cc.setPreservedRegs(kGroupGp , Support::lsbMask<uint32_t>(8));
+ cc.setPreservedRegs(kGroupVec , Support::lsbMask<uint32_t>(8) & ~Support::lsbMask<uint32_t>(n));
+ break;
+ }
+
+ case CallConv::kIdX64LightCall2:
+ case CallConv::kIdX64LightCall3:
+ case CallConv::kIdX64LightCall4: {
+ uint32_t n = (ccId - CallConv::kIdX64LightCall2) + 2;
+
+ cc.setArchType(ArchInfo::kIdX64);
+ cc.setFlags(CallConv::kFlagPassFloatsByVec);
+ cc.setNaturalStackAlignment(16);
+ cc.setPassedOrder(kGroupGp, kZax, kZdx, kZcx, kZsi, kZdi);
+ cc.setPassedOrder(kGroupMm, 0, 1, 2, 3, 4, 5, 6, 7);
+ cc.setPassedOrder(kGroupVec, 0, 1, 2, 3, 4, 5, 6, 7);
+ cc.setPassedOrder(kGroupKReg, 0, 1, 2, 3, 4, 5, 6, 7);
+
+ cc.setPreservedRegs(kGroupGp , Support::lsbMask<uint32_t>(16));
+ cc.setPreservedRegs(kGroupVec ,~Support::lsbMask<uint32_t>(n));
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidArgument);
+ }
+
+ cc.setId(ccId);
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86callconv_p.h b/3rdparty/asmjit/src/asmjit/x86/x86callconv_p.h
new file mode 100644
index 00000000000..5fc21133cef
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86callconv_p.h
@@ -0,0 +1,50 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86CALLCONV_P_H_INCLUDED
+#define ASMJIT_X86_X86CALLCONV_P_H_INCLUDED
+
+#include "../core/callconv.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::CallConvInternal]
+// ============================================================================
+
+//! X86-specific function API (calling conventions and other utilities).
+namespace CallConvInternal {
+ //! Initialize `CallConv` structure (X86 specific).
+ Error init(CallConv& cc, uint32_t ccId) noexcept;
+}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86CALLCONV_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86compiler.cpp b/3rdparty/asmjit/src/asmjit/x86/x86compiler.cpp
new file mode 100644
index 00000000000..910ed5e7dde
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86compiler.cpp
@@ -0,0 +1,76 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_NO_COMPILER)
+
+#include "../x86/x86assembler.h"
+#include "../x86/x86compiler.h"
+#include "../x86/x86rapass_p.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::Compiler - Construction / Destruction]
+// ============================================================================
+
+Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
+ if (code)
+ code->attach(this);
+}
+Compiler::~Compiler() noexcept {}
+
+// ============================================================================
+// [asmjit::x86::Compiler - Finalize]
+// ============================================================================
+
+Error Compiler::finalize() {
+ ASMJIT_PROPAGATE(runPasses());
+ Assembler a(_code);
+ return serialize(&a);
+}
+
+// ============================================================================
+// [asmjit::x86::Compiler - Events]
+// ============================================================================
+
+Error Compiler::onAttach(CodeHolder* code) noexcept {
+ uint32_t archId = code->archId();
+ if (!ArchInfo::isX86Family(archId))
+ return DebugUtils::errored(kErrorInvalidArch);
+
+ ASMJIT_PROPAGATE(Base::onAttach(code));
+ _gpRegInfo.setSignature(archId == ArchInfo::kIdX86 ? uint32_t(Gpd::kSignature) : uint32_t(Gpq::kSignature));
+
+ Error err = addPassT<X86RAPass>();
+ if (ASMJIT_UNLIKELY(err)) {
+ onDetach(code);
+ return err;
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86 && !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86compiler.h b/3rdparty/asmjit/src/asmjit/x86/x86compiler.h
new file mode 100644
index 00000000000..cc7035b2990
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86compiler.h
@@ -0,0 +1,288 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86COMPILER_H_INCLUDED
+#define ASMJIT_X86_X86COMPILER_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/compiler.h"
+#include "../core/datatypes.h"
+#include "../core/type.h"
+#include "../x86/x86emitter.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::Compiler]
+// ============================================================================
+
+//! Architecture-dependent asm-compiler (X86).
+class ASMJIT_VIRTAPI Compiler
+ : public BaseCompiler,
+ public EmitterExplicitT<Compiler> {
+public:
+ ASMJIT_NONCOPYABLE(Compiler)
+ typedef BaseCompiler Base;
+
+ //! \name Construction & Destruction
+ //! \{
+
+ ASMJIT_API explicit Compiler(CodeHolder* code = nullptr) noexcept;
+ ASMJIT_API virtual ~Compiler() noexcept;
+
+ //! \}
+
+ //! \name Virtual Registers
+ //! \{
+
+#ifndef ASMJIT_NO_LOGGING
+# define ASMJIT_NEW_REG_FMT(OUT, PARAM, FORMAT, ARGS) \
+ _newRegFmt(OUT, PARAM, FORMAT, ARGS)
+#else
+# define ASMJIT_NEW_REG_FMT(OUT, PARAM, FORMAT, ARGS) \
+ DebugUtils::unused(FORMAT); \
+ DebugUtils::unused(std::forward<Args>(args)...); \
+ _newReg(OUT, PARAM)
+#endif
+
+#define ASMJIT_NEW_REG_CUSTOM(FUNC, REG) \
+ inline REG FUNC(uint32_t typeId) { \
+ REG reg(Globals::NoInit); \
+ _newReg(reg, typeId); \
+ return reg; \
+ } \
+ \
+ template<typename... Args> \
+ inline REG FUNC(uint32_t typeId, const char* fmt, Args&&... args) { \
+ REG reg(Globals::NoInit); \
+ ASMJIT_NEW_REG_FMT(reg, typeId, fmt, std::forward<Args>(args)...); \
+ return reg; \
+ }
+
+#define ASMJIT_NEW_REG_TYPED(FUNC, REG, TYPE_ID) \
+ inline REG FUNC() { \
+ REG reg(Globals::NoInit); \
+ _newReg(reg, TYPE_ID); \
+ return reg; \
+ } \
+ \
+ template<typename... Args> \
+ inline REG FUNC(const char* fmt, Args&&... args) { \
+ REG reg(Globals::NoInit); \
+ ASMJIT_NEW_REG_FMT(reg, TYPE_ID, fmt, std::forward<Args>(args)...); \
+ return reg; \
+ }
+
+ template<typename RegT>
+ inline RegT newSimilarReg(const RegT& ref) {
+ RegT reg(Globals::NoInit);
+ _newReg(reg, ref);
+ return reg;
+ }
+
+ template<typename RegT, typename... Args>
+ inline RegT newSimilarReg(const RegT& ref, const char* fmt, Args&&... args) {
+ RegT reg(Globals::NoInit);
+ ASMJIT_NEW_REG_FMT(reg, ref, fmt, std::forward<Args>(args)...);
+ return reg;
+ }
+
+ ASMJIT_NEW_REG_CUSTOM(newReg , Reg )
+ ASMJIT_NEW_REG_CUSTOM(newGp , Gp )
+ ASMJIT_NEW_REG_CUSTOM(newVec , Vec )
+ ASMJIT_NEW_REG_CUSTOM(newK , KReg)
+
+ ASMJIT_NEW_REG_TYPED(newI8 , Gp , Type::kIdI8 )
+ ASMJIT_NEW_REG_TYPED(newU8 , Gp , Type::kIdU8 )
+ ASMJIT_NEW_REG_TYPED(newI16 , Gp , Type::kIdI16 )
+ ASMJIT_NEW_REG_TYPED(newU16 , Gp , Type::kIdU16 )
+ ASMJIT_NEW_REG_TYPED(newI32 , Gp , Type::kIdI32 )
+ ASMJIT_NEW_REG_TYPED(newU32 , Gp , Type::kIdU32 )
+ ASMJIT_NEW_REG_TYPED(newI64 , Gp , Type::kIdI64 )
+ ASMJIT_NEW_REG_TYPED(newU64 , Gp , Type::kIdU64 )
+ ASMJIT_NEW_REG_TYPED(newInt8 , Gp , Type::kIdI8 )
+ ASMJIT_NEW_REG_TYPED(newUInt8 , Gp , Type::kIdU8 )
+ ASMJIT_NEW_REG_TYPED(newInt16 , Gp , Type::kIdI16 )
+ ASMJIT_NEW_REG_TYPED(newUInt16 , Gp , Type::kIdU16 )
+ ASMJIT_NEW_REG_TYPED(newInt32 , Gp , Type::kIdI32 )
+ ASMJIT_NEW_REG_TYPED(newUInt32 , Gp , Type::kIdU32 )
+ ASMJIT_NEW_REG_TYPED(newInt64 , Gp , Type::kIdI64 )
+ ASMJIT_NEW_REG_TYPED(newUInt64 , Gp , Type::kIdU64 )
+ ASMJIT_NEW_REG_TYPED(newIntPtr , Gp , Type::kIdIntPtr )
+ ASMJIT_NEW_REG_TYPED(newUIntPtr, Gp , Type::kIdUIntPtr)
+
+ ASMJIT_NEW_REG_TYPED(newGpb , Gp , Type::kIdU8 )
+ ASMJIT_NEW_REG_TYPED(newGpw , Gp , Type::kIdU16 )
+ ASMJIT_NEW_REG_TYPED(newGpd , Gp , Type::kIdU32 )
+ ASMJIT_NEW_REG_TYPED(newGpq , Gp , Type::kIdU64 )
+ ASMJIT_NEW_REG_TYPED(newGpz , Gp , Type::kIdUIntPtr)
+ ASMJIT_NEW_REG_TYPED(newXmm , Xmm , Type::kIdI32x4 )
+ ASMJIT_NEW_REG_TYPED(newXmmSs , Xmm , Type::kIdF32x1 )
+ ASMJIT_NEW_REG_TYPED(newXmmSd , Xmm , Type::kIdF64x1 )
+ ASMJIT_NEW_REG_TYPED(newXmmPs , Xmm , Type::kIdF32x4 )
+ ASMJIT_NEW_REG_TYPED(newXmmPd , Xmm , Type::kIdF64x2 )
+ ASMJIT_NEW_REG_TYPED(newYmm , Ymm , Type::kIdI32x8 )
+ ASMJIT_NEW_REG_TYPED(newYmmPs , Ymm , Type::kIdF32x8 )
+ ASMJIT_NEW_REG_TYPED(newYmmPd , Ymm , Type::kIdF64x4 )
+ ASMJIT_NEW_REG_TYPED(newZmm , Zmm , Type::kIdI32x16 )
+ ASMJIT_NEW_REG_TYPED(newZmmPs , Zmm , Type::kIdF32x16 )
+ ASMJIT_NEW_REG_TYPED(newZmmPd , Zmm , Type::kIdF64x8 )
+ ASMJIT_NEW_REG_TYPED(newMm , Mm , Type::kIdMmx64 )
+ ASMJIT_NEW_REG_TYPED(newKb , KReg, Type::kIdMask8 )
+ ASMJIT_NEW_REG_TYPED(newKw , KReg, Type::kIdMask16 )
+ ASMJIT_NEW_REG_TYPED(newKd , KReg, Type::kIdMask32 )
+ ASMJIT_NEW_REG_TYPED(newKq , KReg, Type::kIdMask64 )
+
+#undef ASMJIT_NEW_REG_TYPED
+#undef ASMJIT_NEW_REG_CUSTOM
+#undef ASMJIT_NEW_REG_FMT
+
+ //! \}
+
+ //! \name Stack
+ //! \{
+
+ //! Creates a new memory chunk allocated on the current function's stack.
+ inline Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
+ Mem m(Globals::NoInit);
+ _newStack(m, size, alignment, name);
+ return m;
+ }
+
+ //! \}
+
+ //! \name Constants
+ //! \{
+
+ //! Put data to a constant-pool and get a memory reference to it.
+ inline Mem newConst(uint32_t scope, const void* data, size_t size) {
+ Mem m(Globals::NoInit);
+ _newConst(m, scope, data, size);
+ return m;
+ }
+
+ //! Put a BYTE `val` to a constant-pool.
+ inline Mem newByteConst(uint32_t scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
+ //! Put a WORD `val` to a constant-pool.
+ inline Mem newWordConst(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
+ //! Put a DWORD `val` to a constant-pool.
+ inline Mem newDWordConst(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
+ //! Put a QWORD `val` to a constant-pool.
+ inline Mem newQWordConst(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
+
+ //! Put a WORD `val` to a constant-pool.
+ inline Mem newInt16Const(uint32_t scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
+ //! Put a WORD `val` to a constant-pool.
+ inline Mem newUInt16Const(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
+ //! Put a DWORD `val` to a constant-pool.
+ inline Mem newInt32Const(uint32_t scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
+ //! Put a DWORD `val` to a constant-pool.
+ inline Mem newUInt32Const(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
+ //! Put a QWORD `val` to a constant-pool.
+ inline Mem newInt64Const(uint32_t scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
+ //! Put a QWORD `val` to a constant-pool.
+ inline Mem newUInt64Const(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
+
+ //! Put a SP-FP `val` to a constant-pool.
+ inline Mem newFloatConst(uint32_t scope, float val) noexcept { return newConst(scope, &val, 4); }
+ //! Put a DP-FP `val` to a constant-pool.
+ inline Mem newDoubleConst(uint32_t scope, double val) noexcept { return newConst(scope, &val, 8); }
+
+ //! Put a MMX `val` to a constant-pool.
+ inline Mem newMmConst(uint32_t scope, const Data64& val) noexcept { return newConst(scope, &val, 8); }
+ //! Put a XMM `val` to a constant-pool.
+ inline Mem newXmmConst(uint32_t scope, const Data128& val) noexcept { return newConst(scope, &val, 16); }
+ //! Put a YMM `val` to a constant-pool.
+ inline Mem newYmmConst(uint32_t scope, const Data256& val) noexcept { return newConst(scope, &val, 32); }
+
+ //! \}
+
+ //! \name Instruction Options
+ //! \{
+
+ //! Force the compiler to not follow the conditional or unconditional jump.
+ inline Compiler& unfollow() noexcept { _instOptions |= Inst::kOptionUnfollow; return *this; }
+ //! Tell the compiler that the destination variable will be overwritten.
+ inline Compiler& overwrite() noexcept { _instOptions |= Inst::kOptionOverwrite; return *this; }
+
+ //! \}
+
+ //! \name Function Call & Ret Intrinsics
+ //! \{
+
+ //! Call a function.
+ inline FuncCallNode* call(const Gp& target, const FuncSignature& sign) { return addCall(Inst::kIdCall, target, sign); }
+ //! \overload
+ inline FuncCallNode* call(const Mem& target, const FuncSignature& sign) { return addCall(Inst::kIdCall, target, sign); }
+ //! \overload
+ inline FuncCallNode* call(const Label& target, const FuncSignature& sign) { return addCall(Inst::kIdCall, target, sign); }
+ //! \overload
+ inline FuncCallNode* call(const Imm& target, const FuncSignature& sign) { return addCall(Inst::kIdCall, target, sign); }
+ //! \overload
+ inline FuncCallNode* call(uint64_t target, const FuncSignature& sign) { return addCall(Inst::kIdCall, Imm(int64_t(target)), sign); }
+
+ //! Return.
+ inline FuncRetNode* ret() { return addRet(Operand(), Operand()); }
+ //! \overload
+ inline FuncRetNode* ret(const BaseReg& o0) { return addRet(o0, Operand()); }
+ //! \overload
+ inline FuncRetNode* ret(const BaseReg& o0, const BaseReg& o1) { return addRet(o0, o1); }
+
+ //! \}
+
+ //! \name Jump Tables Support
+ //! \{
+
+ using EmitterExplicitT<Compiler>::jmp;
+
+ inline Error jmp(const BaseReg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdJmp, target, annotation); }
+ inline Error jmp(const BaseMem& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdJmp, target, annotation); }
+
+ //! \}
+
+ //! \name Finalize
+ //! \{
+
+ ASMJIT_API Error finalize() override;
+
+ //! \}
+
+ //! \name Events
+ //! \{
+
+ ASMJIT_API Error onAttach(CodeHolder* code) noexcept override;
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_X86_X86COMPILER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86emitter.h b/3rdparty/asmjit/src/asmjit/x86/x86emitter.h
new file mode 100644
index 00000000000..cfd96a18809
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86emitter.h
@@ -0,0 +1,5566 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86EMITTER_H_INCLUDED
+#define ASMJIT_X86_X86EMITTER_H_INCLUDED
+
+#include "../core/emitter.h"
+#include "../core/support.h"
+#include "../x86/x86globals.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+#define ASMJIT_INST_0x(NAME, ID) \
+ inline Error NAME() { return _emitter()->emit(Inst::kId##ID); }
+
+#define ASMJIT_INST_1x(NAME, ID, T0) \
+ inline Error NAME(const T0& o0) { return _emitter()->emit(Inst::kId##ID, o0); }
+
+#define ASMJIT_INST_1i(NAME, ID, T0) \
+ inline Error NAME(const T0& o0) { return _emitter()->emit(Inst::kId##ID, o0); } \
+ inline Error NAME(int o0) { return _emitter()->emit(Inst::kId##ID, Support::asInt(o0)); } \
+ inline Error NAME(unsigned int o0) { return _emitter()->emit(Inst::kId##ID, Support::asInt(o0)); } \
+ inline Error NAME(int64_t o0) { return _emitter()->emit(Inst::kId##ID, Support::asInt(o0)); } \
+ inline Error NAME(uint64_t o0) { return _emitter()->emit(Inst::kId##ID, Support::asInt(o0)); }
+
+#define ASMJIT_INST_1c(NAME, ID, CONV, T0) \
+ inline Error NAME(uint32_t cc, const T0& o0) { return _emitter()->emit(CONV(cc), o0); } \
+ inline Error NAME##a(const T0& o0) { return _emitter()->emit(Inst::kId##ID##a, o0); } \
+ inline Error NAME##ae(const T0& o0) { return _emitter()->emit(Inst::kId##ID##ae, o0); } \
+ inline Error NAME##b(const T0& o0) { return _emitter()->emit(Inst::kId##ID##b, o0); } \
+ inline Error NAME##be(const T0& o0) { return _emitter()->emit(Inst::kId##ID##be, o0); } \
+ inline Error NAME##c(const T0& o0) { return _emitter()->emit(Inst::kId##ID##c, o0); } \
+ inline Error NAME##e(const T0& o0) { return _emitter()->emit(Inst::kId##ID##e, o0); } \
+ inline Error NAME##g(const T0& o0) { return _emitter()->emit(Inst::kId##ID##g, o0); } \
+ inline Error NAME##ge(const T0& o0) { return _emitter()->emit(Inst::kId##ID##ge, o0); } \
+ inline Error NAME##l(const T0& o0) { return _emitter()->emit(Inst::kId##ID##l, o0); } \
+ inline Error NAME##le(const T0& o0) { return _emitter()->emit(Inst::kId##ID##le, o0); } \
+ inline Error NAME##na(const T0& o0) { return _emitter()->emit(Inst::kId##ID##na, o0); } \
+ inline Error NAME##nae(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nae, o0); } \
+ inline Error NAME##nb(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nb, o0); } \
+ inline Error NAME##nbe(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nbe, o0); } \
+ inline Error NAME##nc(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nc, o0); } \
+ inline Error NAME##ne(const T0& o0) { return _emitter()->emit(Inst::kId##ID##ne, o0); } \
+ inline Error NAME##ng(const T0& o0) { return _emitter()->emit(Inst::kId##ID##ng, o0); } \
+ inline Error NAME##nge(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nge, o0); } \
+ inline Error NAME##nl(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nl, o0); } \
+ inline Error NAME##nle(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nle, o0); } \
+ inline Error NAME##no(const T0& o0) { return _emitter()->emit(Inst::kId##ID##no, o0); } \
+ inline Error NAME##np(const T0& o0) { return _emitter()->emit(Inst::kId##ID##np, o0); } \
+ inline Error NAME##ns(const T0& o0) { return _emitter()->emit(Inst::kId##ID##ns, o0); } \
+ inline Error NAME##nz(const T0& o0) { return _emitter()->emit(Inst::kId##ID##nz, o0); } \
+ inline Error NAME##o(const T0& o0) { return _emitter()->emit(Inst::kId##ID##o, o0); } \
+ inline Error NAME##p(const T0& o0) { return _emitter()->emit(Inst::kId##ID##p, o0); } \
+ inline Error NAME##pe(const T0& o0) { return _emitter()->emit(Inst::kId##ID##pe, o0); } \
+ inline Error NAME##po(const T0& o0) { return _emitter()->emit(Inst::kId##ID##po, o0); } \
+ inline Error NAME##s(const T0& o0) { return _emitter()->emit(Inst::kId##ID##s, o0); } \
+ inline Error NAME##z(const T0& o0) { return _emitter()->emit(Inst::kId##ID##z, o0); }
+
+#define ASMJIT_INST_2x(NAME, ID, T0, T1) \
+ inline Error NAME(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID, o0, o1); }
+
+#define ASMJIT_INST_2i(NAME, ID, T0, T1) \
+ inline Error NAME(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID, o0, o1); } \
+ inline Error NAME(const T0& o0, int o1) { return _emitter()->emit(Inst::kId##ID, o0, Support::asInt(o1)); } \
+ inline Error NAME(const T0& o0, unsigned int o1) { return _emitter()->emit(Inst::kId##ID, o0, Support::asInt(o1)); } \
+ inline Error NAME(const T0& o0, int64_t o1) { return _emitter()->emit(Inst::kId##ID, o0, Support::asInt(o1)); } \
+ inline Error NAME(const T0& o0, uint64_t o1) { return _emitter()->emit(Inst::kId##ID, o0, Support::asInt(o1)); }
+
+#define ASMJIT_INST_2c(NAME, ID, CONV, T0, T1) \
+ inline Error NAME(uint32_t cc, const T0& o0, const T1& o1) { return _emitter()->emit(CONV(cc), o0, o1); } \
+ inline Error NAME##a(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##a, o0, o1); } \
+ inline Error NAME##ae(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##ae, o0, o1); } \
+ inline Error NAME##b(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##b, o0, o1); } \
+ inline Error NAME##be(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##be, o0, o1); } \
+ inline Error NAME##c(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##c, o0, o1); } \
+ inline Error NAME##e(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##e, o0, o1); } \
+ inline Error NAME##g(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##g, o0, o1); } \
+ inline Error NAME##ge(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##ge, o0, o1); } \
+ inline Error NAME##l(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##l, o0, o1); } \
+ inline Error NAME##le(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##le, o0, o1); } \
+ inline Error NAME##na(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##na, o0, o1); } \
+ inline Error NAME##nae(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nae, o0, o1); } \
+ inline Error NAME##nb(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nb, o0, o1); } \
+ inline Error NAME##nbe(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nbe, o0, o1); } \
+ inline Error NAME##nc(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nc, o0, o1); } \
+ inline Error NAME##ne(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##ne, o0, o1); } \
+ inline Error NAME##ng(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##ng, o0, o1); } \
+ inline Error NAME##nge(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nge, o0, o1); } \
+ inline Error NAME##nl(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nl, o0, o1); } \
+ inline Error NAME##nle(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nle, o0, o1); } \
+ inline Error NAME##no(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##no, o0, o1); } \
+ inline Error NAME##np(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##np, o0, o1); } \
+ inline Error NAME##ns(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##ns, o0, o1); } \
+ inline Error NAME##nz(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##nz, o0, o1); } \
+ inline Error NAME##o(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##o, o0, o1); } \
+ inline Error NAME##p(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##p, o0, o1); } \
+ inline Error NAME##pe(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##pe, o0, o1); } \
+ inline Error NAME##po(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##po, o0, o1); } \
+ inline Error NAME##s(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##s, o0, o1); } \
+ inline Error NAME##z(const T0& o0, const T1& o1) { return _emitter()->emit(Inst::kId##ID##z, o0, o1); }
+
+#define ASMJIT_INST_3x(NAME, ID, T0, T1, T2) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2); }
+
+#define ASMJIT_INST_3i(NAME, ID, T0, T1, T2) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2); } \
+ inline Error NAME(const T0& o0, const T1& o1, int o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, Support::asInt(o2)); } \
+ inline Error NAME(const T0& o0, const T1& o1, unsigned int o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, Support::asInt(o2)); } \
+ inline Error NAME(const T0& o0, const T1& o1, int64_t o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, Support::asInt(o2)); } \
+ inline Error NAME(const T0& o0, const T1& o1, uint64_t o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, Support::asInt(o2)); }
+
+#define ASMJIT_INST_3ii(NAME, ID, T0, T1, T2) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2); } \
+ inline Error NAME(const T0& o0, int o1, int o2) { return _emitter()->emit(Inst::kId##ID, o0, Imm(o1), Support::asInt(o2)); }
+
+#define ASMJIT_INST_4x(NAME, ID, T0, T1, T2, T3) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3); }
+
+#define ASMJIT_INST_4i(NAME, ID, T0, T1, T2, T3) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, int o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, Support::asInt(o3)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, unsigned int o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, Support::asInt(o3)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, int64_t o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, Support::asInt(o3)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, uint64_t o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, Support::asInt(o3)); }
+
+#define ASMJIT_INST_4ii(NAME, ID, T0, T1, T2, T3) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3); } \
+ inline Error NAME(const T0& o0, const T1& o1, int o2, int o3) { return _emitter()->emit(Inst::kId##ID, o0, o1, Imm(o2), Support::asInt(o3)); }
+
+#define ASMJIT_INST_5x(NAME, ID, T0, T1, T2, T3, T4) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, const T4& o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, o4); }
+
+#define ASMJIT_INST_5i(NAME, ID, T0, T1, T2, T3, T4) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, const T4& o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, o4); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, int o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, Support::asInt(o4)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, unsigned int o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, Support::asInt(o4)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, int64_t o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, Support::asInt(o4)); } \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, uint64_t o4) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, Support::asInt(o4)); }
+
+#define ASMJIT_INST_6x(NAME, ID, T0, T1, T2, T3, T4, T5) \
+ inline Error NAME(const T0& o0, const T1& o1, const T2& o2, const T3& o3, const T4& o4, const T5& o5) { return _emitter()->emit(Inst::kId##ID, o0, o1, o2, o3, o4, o5); }
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::EmitterExplicitT]
+// ============================================================================
+
+template<typename This>
+struct EmitterExplicitT {
+ //! \cond
+ // These typedefs are used to describe implicit operands passed explicitly.
+ typedef Gp AL;
+ typedef Gp AH;
+ typedef Gp CL;
+ typedef Gp AX;
+ typedef Gp DX;
+
+ typedef Gp EAX;
+ typedef Gp EBX;
+ typedef Gp ECX;
+ typedef Gp EDX;
+
+ typedef Gp RAX;
+ typedef Gp RBX;
+ typedef Gp RCX;
+ typedef Gp RDX;
+
+ typedef Gp ZAX;
+ typedef Gp ZBX;
+ typedef Gp ZCX;
+ typedef Gp ZDX;
+
+ typedef Mem DS_ZAX; // ds:[zax]
+ typedef Mem DS_ZDI; // ds:[zdi]
+ typedef Mem ES_ZDI; // es:[zdi]
+ typedef Mem DS_ZSI; // ds:[zsi]
+
+ typedef Xmm XMM0;
+
+ // These two are unfortunately reported by the sanitizer. We know what we do,
+ // however, the sanitizer doesn't. I have tried to use reinterpret_cast instead,
+ // but that would generate bad code when compiled by MSC.
+ ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF inline This* _emitter() noexcept { return static_cast<This*>(this); }
+ ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF inline const This* _emitter() const noexcept { return static_cast<const This*>(this); }
+
+ //! \endcond
+
+ //! \name Native Registers
+ //! \{
+
+ //! Returns either GPD or GPQ register of the given `id` depending on the emitter's architecture.
+ inline Gp gpz(uint32_t id) const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), id); }
+
+ inline Gp zax() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdAx); }
+ inline Gp zcx() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdCx); }
+ inline Gp zdx() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdDx); }
+ inline Gp zbx() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdBx); }
+ inline Gp zsp() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdSp); }
+ inline Gp zbp() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdBp); }
+ inline Gp zsi() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdSi); }
+ inline Gp zdi() const noexcept { return Gp(_emitter()->_gpRegInfo.signature(), Gp::kIdDi); }
+
+ //! \}
+
+ //! \name Native Pointers
+ //! \{
+
+ //! Creates a target dependent pointer of which base register's id is `baseId`.
+ inline Mem ptr_base(uint32_t baseId, int32_t off = 0, uint32_t size = 0) const noexcept {
+ return Mem(Mem::Decomposed { _emitter()->_gpRegInfo.type(), baseId, 0, 0, off, size, 0 });
+ }
+
+ inline Mem ptr_zax(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdAx, off, size); }
+ inline Mem ptr_zcx(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdCx, off, size); }
+ inline Mem ptr_zdx(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdDx, off, size); }
+ inline Mem ptr_zbx(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdBx, off, size); }
+ inline Mem ptr_zsp(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdSp, off, size); }
+ inline Mem ptr_zbp(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdBp, off, size); }
+ inline Mem ptr_zsi(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdSi, off, size); }
+ inline Mem ptr_zdi(int32_t off = 0, uint32_t size = 0) const noexcept { return ptr_base(Gp::kIdDi, off, size); }
+
+ //! Creates an `intptr_t` memory operand depending on the current architecture.
+ inline Mem intptr_ptr(const Gp& base, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Label& base, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Label& base, const Gp& index, uint32_t shift, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Label& base, const Vec& index, uint32_t shift, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(const Rip& rip, int32_t offset = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(rip, offset, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(uint64_t base) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr(uint64_t base, const Gp& index, uint32_t shift = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, nativeGpSize);
+ }
+ //! \overload
+ inline Mem intptr_ptr_abs(uint64_t base) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, nativeGpSize, BaseMem::kSignatureMemAbs);
+ }
+ //! \overload
+ inline Mem intptr_ptr_abs(uint64_t base, const Gp& index, uint32_t shift = 0) const noexcept {
+ uint32_t nativeGpSize = _emitter()->gpSize();
+ return Mem(base, index, shift, nativeGpSize, BaseMem::kSignatureMemAbs);
+ }
+
+ //! \}
+
+ //! \name Embed
+ //! \{
+
+ //! Adds 8-bit integer data to the CodeBuffer.
+ inline Error db(uint8_t x) { return _emitter()->embed(&x, 1); }
+ //! Adds 16-bit integer data to the CodeBuffer.
+ inline Error dw(uint16_t x) { return _emitter()->embed(&x, 2); }
+ //! Adds 32-bit integer data to the CodeBuffer.
+ inline Error dd(uint32_t x) { return _emitter()->embed(&x, 4); }
+ //! Adds 64-bit integer data to the CodeBuffer.
+ inline Error dq(uint64_t x) { return _emitter()->embed(&x, 8); }
+
+ //! Adds 8-bit integer data to the CodeBuffer.
+ inline Error dint8(int8_t x) { return _emitter()->embed(&x, sizeof(int8_t)); }
+ //! Adds 8-bit integer data to the CodeBuffer.
+ inline Error duint8(uint8_t x) { return _emitter()->embed(&x, sizeof(uint8_t)); }
+
+ //! Adds 16-bit integer data to the CodeBuffer.
+ inline Error dint16(int16_t x) { return _emitter()->embed(&x, sizeof(int16_t)); }
+ //! Adds 16-bit integer data to the CodeBuffer.
+ inline Error duint16(uint16_t x) { return _emitter()->embed(&x, sizeof(uint16_t)); }
+
+ //! Adds 32-bit integer data to the CodeBuffer.
+ inline Error dint32(int32_t x) { return _emitter()->embed(&x, sizeof(int32_t)); }
+ //! Adds 32-bit integer data to the CodeBuffer.
+ inline Error duint32(uint32_t x) { return _emitter()->embed(&x, sizeof(uint32_t)); }
+
+ //! Adds 64-bit integer data to the CodeBuffer.
+ inline Error dint64(int64_t x) { return _emitter()->embed(&x, sizeof(int64_t)); }
+ //! Adds 64-bit integer data to the CodeBuffer.
+ inline Error duint64(uint64_t x) { return _emitter()->embed(&x, sizeof(uint64_t)); }
+
+ //! Adds float data to the CodeBuffer.
+ inline Error dfloat(float x) { return _emitter()->embed(&x, sizeof(float)); }
+ //! Adds double data to the CodeBuffer.
+ inline Error ddouble(double x) { return _emitter()->embed(&x, sizeof(double)); }
+
+ //! Adds MMX data to the CodeBuffer.
+ inline Error dmm(const Data64& x) { return _emitter()->embed(&x, sizeof(Data64)); }
+ //! Adds XMM data to the CodeBuffer.
+ inline Error dxmm(const Data128& x) { return _emitter()->embed(&x, sizeof(Data128)); }
+ //! Adds YMM data to the CodeBuffer.
+ inline Error dymm(const Data256& x) { return _emitter()->embed(&x, sizeof(Data256)); }
+
+ //! Adds data in a given structure instance to the CodeBuffer.
+ template<typename T>
+ inline Error dstruct(const T& x) { return _emitter()->embed(&x, uint32_t(sizeof(T))); }
+
+ //! \}
+
+protected:
+ //! \cond
+ inline This& _addInstOptions(uint32_t options) noexcept {
+ _emitter()->addInstOptions(options);
+ return *_emitter();
+ }
+ //! \endcond
+
+public:
+ //! \name Short/Long Form Options
+ //! \{
+
+ //! Force short form of jmp/jcc instruction.
+ inline This& short_() noexcept { return _addInstOptions(Inst::kOptionShortForm); }
+ //! Force long form of jmp/jcc instruction.
+ inline This& long_() noexcept { return _addInstOptions(Inst::kOptionLongForm); }
+
+ //! \}
+
+ //! \name Encoding Options
+ //! \{
+
+ //! Prefer MOD_MR encoding over MOD_RM (the default) when encoding instruction
+ //! that allows both. This option is only applicable to instructions where both
+ //! operands are registers.
+ inline This& mod_mr() noexcept { return _addInstOptions(Inst::kOptionModMR); }
+
+ //! \}
+
+ //! \name Prefix Options
+ //! \{
+
+ //! Condition is likely to be taken (has only benefit on P4).
+ inline This& taken() noexcept { return _addInstOptions(Inst::kOptionTaken); }
+ //! Condition is unlikely to be taken (has only benefit on P4).
+ inline This& notTaken() noexcept { return _addInstOptions(Inst::kOptionNotTaken); }
+
+ //! Use LOCK prefix.
+ inline This& lock() noexcept { return _addInstOptions(Inst::kOptionLock); }
+ //! Use XACQUIRE prefix.
+ inline This& xacquire() noexcept { return _addInstOptions(Inst::kOptionXAcquire); }
+ //! Use XRELEASE prefix.
+ inline This& xrelease() noexcept { return _addInstOptions(Inst::kOptionXRelease); }
+
+ //! Use BND/REPNE prefix.
+ //!
+ //! \note This is the same as using `repne()` or `repnz()` prefix.
+ inline This& bnd() noexcept { return _addInstOptions(Inst::kOptionRepne); }
+
+ //! Use REP/REPZ prefix.
+ //!
+ //! \note This is the same as using `repe()` or `repz()` prefix.
+ inline This& rep(const Gp& zcx) noexcept {
+ _emitter()->_extraReg.init(zcx);
+ return _addInstOptions(Inst::kOptionRep);
+ }
+
+ //! Use REP/REPE prefix.
+ //!
+ //! \note This is the same as using `rep()` or `repz()` prefix.
+ inline This& repe(const Gp& zcx) noexcept { return rep(zcx); }
+
+ //! Use REP/REPE prefix.
+ //!
+ //! \note This is the same as using `rep()` or `repe()` prefix.
+ inline This& repz(const Gp& zcx) noexcept { return rep(zcx); }
+
+ //! Use REPNE prefix.
+ //!
+ //! \note This is the same as using `bnd()` or `repnz()` prefix.
+ inline This& repne(const Gp& zcx) noexcept {
+ _emitter()->_extraReg.init(zcx);
+ return _addInstOptions(Inst::kOptionRepne);
+ }
+
+ //! Use REPNE prefix.
+ //!
+ //! \note This is the same as using `bnd()` or `repne()` prefix.
+ inline This& repnz(const Gp& zcx) noexcept { return repne(zcx); }
+
+ //! \}
+
+ //! \name REX Options
+ //! \{
+
+ //! Force REX prefix to be emitted even when it's not needed (X86_64).
+ //!
+ //! \note Don't use when using high 8-bit registers as REX prefix makes them
+ //! inaccessible and `x86::Assembler` would fail to encode such instruction.
+ inline This& rex() noexcept { return _addInstOptions(Inst::kOptionRex); }
+
+ //! Force REX.B prefix (X64) [It exists for special purposes only].
+ inline This& rex_b() noexcept { return _addInstOptions(Inst::kOptionOpCodeB); }
+ //! Force REX.X prefix (X64) [It exists for special purposes only].
+ inline This& rex_x() noexcept { return _addInstOptions(Inst::kOptionOpCodeX); }
+ //! Force REX.R prefix (X64) [It exists for special purposes only].
+ inline This& rex_r() noexcept { return _addInstOptions(Inst::kOptionOpCodeR); }
+ //! Force REX.W prefix (X64) [It exists for special purposes only].
+ inline This& rex_w() noexcept { return _addInstOptions(Inst::kOptionOpCodeW); }
+
+ //! \}
+
+ //! \name VEX and EVEX Options
+ //! \{
+
+ //! Force 3-byte VEX prefix (AVX+).
+ inline This& vex3() noexcept { return _addInstOptions(Inst::kOptionVex3); }
+ //! Force 4-byte EVEX prefix (AVX512+).
+ inline This& evex() noexcept { return _addInstOptions(Inst::kOptionEvex); }
+
+ //! \}
+
+ //! \name AVX-512 Options & Masking
+ //! \{
+
+ //! Use masking {k} (AVX512+).
+ inline This& k(const KReg& kreg) noexcept {
+ _emitter()->_extraReg.init(kreg);
+ return *_emitter();
+ }
+
+ //! Use zeroing instead of merging (AVX512+).
+ inline This& z() noexcept { return _addInstOptions(Inst::kOptionZMask); }
+
+ //! Suppress all exceptions (AVX512+).
+ inline This& sae() noexcept { return _addInstOptions(Inst::kOptionSAE); }
+ //! Static rounding mode {rn} (round-to-nearest even) and {sae} (AVX512+).
+ inline This& rn_sae() noexcept { return _addInstOptions(Inst::kOptionER | Inst::kOptionRN_SAE); }
+ //! Static rounding mode {rd} (round-down, toward -inf) and {sae} (AVX512+).
+ inline This& rd_sae() noexcept { return _addInstOptions(Inst::kOptionER | Inst::kOptionRD_SAE); }
+ //! Static rounding mode {ru} (round-up, toward +inf) and {sae} (AVX512+).
+ inline This& ru_sae() noexcept { return _addInstOptions(Inst::kOptionER | Inst::kOptionRU_SAE); }
+ //! Static rounding mode {rz} (round-toward-zero, truncate) and {sae} (AVX512+).
+ inline This& rz_sae() noexcept { return _addInstOptions(Inst::kOptionER | Inst::kOptionRZ_SAE); }
+
+ //! \}
+
+ //! \name Base Instructions & GP Extensions
+ //! \{
+
+ ASMJIT_INST_2x(adc, Adc, Gp, Gp) // ANY
+ ASMJIT_INST_2x(adc, Adc, Gp, Mem) // ANY
+ ASMJIT_INST_2i(adc, Adc, Gp, Imm) // ANY
+ ASMJIT_INST_2x(adc, Adc, Mem, Gp) // ANY
+ ASMJIT_INST_2i(adc, Adc, Mem, Imm) // ANY
+ ASMJIT_INST_2x(add, Add, Gp, Gp) // ANY
+ ASMJIT_INST_2x(add, Add, Gp, Mem) // ANY
+ ASMJIT_INST_2i(add, Add, Gp, Imm) // ANY
+ ASMJIT_INST_2x(add, Add, Mem, Gp) // ANY
+ ASMJIT_INST_2i(add, Add, Mem, Imm) // ANY
+ ASMJIT_INST_2x(and_, And, Gp, Gp) // ANY
+ ASMJIT_INST_2x(and_, And, Gp, Mem) // ANY
+ ASMJIT_INST_2i(and_, And, Gp, Imm) // ANY
+ ASMJIT_INST_2x(and_, And, Mem, Gp) // ANY
+ ASMJIT_INST_2i(and_, And, Mem, Imm) // ANY
+ ASMJIT_INST_2x(arpl, Arpl, Gp, Gp) // X86
+ ASMJIT_INST_2x(arpl, Arpl, Mem, Gp) // X86
+ ASMJIT_INST_2x(bound, Bound, Gp, Mem) // X86
+ ASMJIT_INST_2x(bsf, Bsf, Gp, Gp) // ANY
+ ASMJIT_INST_2x(bsf, Bsf, Gp, Mem) // ANY
+ ASMJIT_INST_2x(bsr, Bsr, Gp, Gp) // ANY
+ ASMJIT_INST_2x(bsr, Bsr, Gp, Mem) // ANY
+ ASMJIT_INST_1x(bswap, Bswap, Gp) // ANY
+ ASMJIT_INST_2x(bt, Bt, Gp, Gp) // ANY
+ ASMJIT_INST_2i(bt, Bt, Gp, Imm) // ANY
+ ASMJIT_INST_2x(bt, Bt, Mem, Gp) // ANY
+ ASMJIT_INST_2i(bt, Bt, Mem, Imm) // ANY
+ ASMJIT_INST_2x(btc, Btc, Gp, Gp) // ANY
+ ASMJIT_INST_2i(btc, Btc, Gp, Imm) // ANY
+ ASMJIT_INST_2x(btc, Btc, Mem, Gp) // ANY
+ ASMJIT_INST_2i(btc, Btc, Mem, Imm) // ANY
+ ASMJIT_INST_2x(btr, Btr, Gp, Gp) // ANY
+ ASMJIT_INST_2i(btr, Btr, Gp, Imm) // ANY
+ ASMJIT_INST_2x(btr, Btr, Mem, Gp) // ANY
+ ASMJIT_INST_2i(btr, Btr, Mem, Imm) // ANY
+ ASMJIT_INST_2x(bts, Bts, Gp, Gp) // ANY
+ ASMJIT_INST_2i(bts, Bts, Gp, Imm) // ANY
+ ASMJIT_INST_2x(bts, Bts, Mem, Gp) // ANY
+ ASMJIT_INST_2i(bts, Bts, Mem, Imm) // ANY
+ ASMJIT_INST_1x(cbw, Cbw, AX) // ANY [EXPLICIT] AX <- Sign Extend AL
+ ASMJIT_INST_2x(cdq, Cdq, EDX, EAX) // ANY [EXPLICIT] EDX:EAX <- Sign Extend EAX
+ ASMJIT_INST_1x(cdqe, Cdqe, EAX) // X64 [EXPLICIT] RAX <- Sign Extend EAX
+ ASMJIT_INST_2x(cqo, Cqo, RDX, RAX) // X64 [EXPLICIT] RDX:RAX <- Sign Extend RAX
+ ASMJIT_INST_2x(cwd, Cwd, DX, AX) // ANY [EXPLICIT] DX:AX <- Sign Extend AX
+ ASMJIT_INST_1x(cwde, Cwde, EAX) // ANY [EXPLICIT] EAX <- Sign Extend AX
+ ASMJIT_INST_1x(call, Call, Gp) // ANY
+ ASMJIT_INST_1x(call, Call, Mem) // ANY
+ ASMJIT_INST_1x(call, Call, Label) // ANY
+ ASMJIT_INST_1i(call, Call, Imm) // ANY
+ ASMJIT_INST_0x(clc, Clc) // ANY
+ ASMJIT_INST_0x(cld, Cld) // ANY
+ ASMJIT_INST_0x(cli, Cli) // ANY
+ ASMJIT_INST_0x(clts, Clts) // ANY
+ ASMJIT_INST_0x(cmc, Cmc) // ANY
+ ASMJIT_INST_2c(cmov, Cmov, Condition::toCmovcc, Gp, Gp) // CMOV
+ ASMJIT_INST_2c(cmov, Cmov, Condition::toCmovcc, Gp, Mem) // CMOV
+ ASMJIT_INST_2x(cmp, Cmp, Gp, Gp) // ANY
+ ASMJIT_INST_2x(cmp, Cmp, Gp, Mem) // ANY
+ ASMJIT_INST_2i(cmp, Cmp, Gp, Imm) // ANY
+ ASMJIT_INST_2x(cmp, Cmp, Mem, Gp) // ANY
+ ASMJIT_INST_2i(cmp, Cmp, Mem, Imm) // ANY
+ ASMJIT_INST_2x(cmps, Cmps, DS_ZSI, ES_ZDI) // ANY [EXPLICIT]
+ ASMJIT_INST_3x(cmpxchg, Cmpxchg, Gp, Gp, ZAX) // I486 [EXPLICIT]
+ ASMJIT_INST_3x(cmpxchg, Cmpxchg, Mem, Gp, ZAX) // I486 [EXPLICIT]
+ ASMJIT_INST_5x(cmpxchg16b, Cmpxchg16b, Mem, RDX, RAX, RCX, RBX); // CMPXCHG16B[EXPLICIT] m == EDX:EAX ? m <- ECX:EBX
+ ASMJIT_INST_5x(cmpxchg8b, Cmpxchg8b, Mem, EDX, EAX, ECX, EBX); // CMPXCHG8B [EXPLICIT] m == RDX:RAX ? m <- RCX:RBX
+ ASMJIT_INST_4x(cpuid, Cpuid, EAX, EBX, ECX, EDX) // I486 [EXPLICIT] EAX:EBX:ECX:EDX <- CPUID[EAX:ECX]
+ ASMJIT_INST_1x(daa, Daa, Gp) // X86 [EXPLICIT]
+ ASMJIT_INST_1x(das, Das, Gp) // X86 [EXPLICIT]
+ ASMJIT_INST_1x(dec, Dec, Gp) // ANY
+ ASMJIT_INST_1x(dec, Dec, Mem) // ANY
+ ASMJIT_INST_2x(div, Div, Gp, Gp) // ANY [EXPLICIT] AH[Rem]: AL[Quot] <- AX / r8
+ ASMJIT_INST_2x(div, Div, Gp, Mem) // ANY [EXPLICIT] AH[Rem]: AL[Quot] <- AX / m8
+ ASMJIT_INST_3x(div, Div, Gp, Gp, Gp) // ANY [EXPLICIT] xDX[Rem]:xAX[Quot] <- xDX:xAX / r16|r32|r64
+ ASMJIT_INST_3x(div, Div, Gp, Gp, Mem) // ANY [EXPLICIT] xDX[Rem]:xAX[Quot] <- xDX:xAX / m16|m32|m64
+ ASMJIT_INST_0x(emms, Emms) // MMX
+ ASMJIT_INST_2x(enter, Enter, Imm, Imm) // ANY
+ ASMJIT_INST_0x(hlt, Hlt) // ANY
+ ASMJIT_INST_2x(idiv, Idiv, Gp, Gp) // ANY [EXPLICIT] AH[Rem]: AL[Quot] <- AX / r8
+ ASMJIT_INST_2x(idiv, Idiv, Gp, Mem) // ANY [EXPLICIT] AH[Rem]: AL[Quot] <- AX / m8
+ ASMJIT_INST_3x(idiv, Idiv, Gp, Gp, Gp) // ANY [EXPLICIT] xDX[Rem]:xAX[Quot] <- xDX:xAX / r16|r32|r64
+ ASMJIT_INST_3x(idiv, Idiv, Gp, Gp, Mem) // ANY [EXPLICIT] xDX[Rem]:xAX[Quot] <- xDX:xAX / m16|m32|m64
+ ASMJIT_INST_2x(imul, Imul, Gp, Gp) // ANY [EXPLICIT] AX <- AL * r8 | ra <- ra * rb
+ ASMJIT_INST_2x(imul, Imul, Gp, Mem) // ANY [EXPLICIT] AX <- AL * m8 | ra <- ra * m16|m32|m64
+ ASMJIT_INST_2i(imul, Imul, Gp, Imm) // ANY
+ ASMJIT_INST_3i(imul, Imul, Gp, Gp, Imm) // ANY
+ ASMJIT_INST_3i(imul, Imul, Gp, Mem, Imm) // ANY
+ ASMJIT_INST_3x(imul, Imul, Gp, Gp, Gp) // ANY [EXPLICIT] xDX:xAX <- xAX * r16|r32|r64
+ ASMJIT_INST_3x(imul, Imul, Gp, Gp, Mem) // ANY [EXPLICIT] xDX:xAX <- xAX * m16|m32|m64
+ ASMJIT_INST_2i(in, In, ZAX, Imm) // ANY
+ ASMJIT_INST_2x(in, In, ZAX, DX) // ANY
+ ASMJIT_INST_1x(inc, Inc, Gp) // ANY
+ ASMJIT_INST_1x(inc, Inc, Mem) // ANY
+ ASMJIT_INST_2x(ins, Ins, ES_ZDI, DX) // ANY
+ ASMJIT_INST_1i(int_, Int, Imm) // ANY
+ ASMJIT_INST_0x(int3, Int3) // ANY
+ ASMJIT_INST_0x(into, Into) // ANY
+ ASMJIT_INST_0x(invd, Invd) // ANY
+ ASMJIT_INST_1x(invlpg, Invlpg, Mem) // ANY
+ ASMJIT_INST_2x(invpcid, Invpcid, Gp, Mem) // ANY
+ ASMJIT_INST_1c(j, J, Condition::toJcc, Label) // ANY
+ ASMJIT_INST_1c(j, J, Condition::toJcc, Imm) // ANY
+ ASMJIT_INST_1c(j, J, Condition::toJcc, uint64_t) // ANY
+ ASMJIT_INST_2x(jecxz, Jecxz, Gp, Label) // ANY [EXPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_2x(jecxz, Jecxz, Gp, Imm) // ANY [EXPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_2x(jecxz, Jecxz, Gp, uint64_t) // ANY [EXPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_1x(jmp, Jmp, Gp) // ANY
+ ASMJIT_INST_1x(jmp, Jmp, Mem) // ANY
+ ASMJIT_INST_1x(jmp, Jmp, Label) // ANY
+ ASMJIT_INST_1x(jmp, Jmp, Imm) // ANY
+ ASMJIT_INST_1x(jmp, Jmp, uint64_t) // ANY
+ ASMJIT_INST_1x(lahf, Lahf, AH) // LAHFSAHF [EXPLICIT] AH <- EFL
+ ASMJIT_INST_2x(lar, Lar, Gp, Gp) // ANY
+ ASMJIT_INST_2x(lar, Lar, Gp, Mem) // ANY
+ ASMJIT_INST_1x(ldmxcsr, Ldmxcsr, Mem) // SSE
+ ASMJIT_INST_2x(lds, Lds, Gp, Mem) // X86
+ ASMJIT_INST_2x(lea, Lea, Gp, Mem) // ANY
+ ASMJIT_INST_0x(leave, Leave) // ANY
+ ASMJIT_INST_2x(les, Les, Gp, Mem) // X86
+ ASMJIT_INST_0x(lfence, Lfence) // SSE2
+ ASMJIT_INST_2x(lfs, Lfs, Gp, Mem) // ANY
+ ASMJIT_INST_1x(lgdt, Lgdt, Mem) // ANY
+ ASMJIT_INST_2x(lgs, Lgs, Gp, Mem) // ANY
+ ASMJIT_INST_1x(lidt, Lidt, Mem) // ANY
+ ASMJIT_INST_1x(lldt, Lldt, Gp) // ANY
+ ASMJIT_INST_1x(lldt, Lldt, Mem) // ANY
+ ASMJIT_INST_1x(lmsw, Lmsw, Gp) // ANY
+ ASMJIT_INST_1x(lmsw, Lmsw, Mem) // ANY
+ ASMJIT_INST_2x(lods, Lods, ZAX, DS_ZSI) // ANY [EXPLICIT]
+ ASMJIT_INST_2x(loop, Loop, ZCX, Label) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_2x(loop, Loop, ZCX, Imm) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_2x(loop, Loop, ZCX, uint64_t) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_2x(loope, Loope, ZCX, Label) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_2x(loope, Loope, ZCX, Imm) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_2x(loope, Loope, ZCX, uint64_t) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_2x(loopne, Loopne, ZCX, Label) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_2x(loopne, Loopne, ZCX, Imm) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_2x(loopne, Loopne, ZCX, uint64_t) // ANY [EXPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_2x(lsl, Lsl, Gp, Gp) // ANY
+ ASMJIT_INST_2x(lsl, Lsl, Gp, Mem) // ANY
+ ASMJIT_INST_2x(lss, Lss, Gp, Mem) // ANY
+ ASMJIT_INST_1x(ltr, Ltr, Gp) // ANY
+ ASMJIT_INST_1x(ltr, Ltr, Mem) // ANY
+ ASMJIT_INST_0x(mfence, Mfence) // SSE2
+ ASMJIT_INST_2x(mov, Mov, Gp, Gp) // ANY
+ ASMJIT_INST_2x(mov, Mov, Gp, Mem) // ANY
+ ASMJIT_INST_2i(mov, Mov, Gp, Imm) // ANY
+ ASMJIT_INST_2x(mov, Mov, Mem, Gp) // ANY
+ ASMJIT_INST_2i(mov, Mov, Mem, Imm) // ANY
+ ASMJIT_INST_2x(mov, Mov, Gp, CReg) // ANY
+ ASMJIT_INST_2x(mov, Mov, CReg, Gp) // ANY
+ ASMJIT_INST_2x(mov, Mov, Gp, DReg) // ANY
+ ASMJIT_INST_2x(mov, Mov, DReg, Gp) // ANY
+ ASMJIT_INST_2x(mov, Mov, Gp, SReg) // ANY
+ ASMJIT_INST_2x(mov, Mov, Mem, SReg) // ANY
+ ASMJIT_INST_2x(mov, Mov, SReg, Gp) // ANY
+ ASMJIT_INST_2x(mov, Mov, SReg, Mem) // ANY
+ ASMJIT_INST_2x(movnti, Movnti, Mem, Gp) // SSE2
+ ASMJIT_INST_2x(movs, Movs, ES_ZDI, DS_ZSI) // ANY [EXPLICIT]
+ ASMJIT_INST_2x(movsx, Movsx, Gp, Gp) // ANY
+ ASMJIT_INST_2x(movsx, Movsx, Gp, Mem) // ANY
+ ASMJIT_INST_2x(movsxd, Movsxd, Gp, Gp) // X64
+ ASMJIT_INST_2x(movsxd, Movsxd, Gp, Mem) // X64
+ ASMJIT_INST_2x(movzx, Movzx, Gp, Gp) // ANY
+ ASMJIT_INST_2x(movzx, Movzx, Gp, Mem) // ANY
+ ASMJIT_INST_2x(mul, Mul, AX, Gp) // ANY [EXPLICIT] AX <- AL * r8
+ ASMJIT_INST_2x(mul, Mul, AX, Mem) // ANY [EXPLICIT] AX <- AL * m8
+ ASMJIT_INST_3x(mul, Mul, ZDX, ZAX, Gp) // ANY [EXPLICIT] xDX:xAX <- xAX * r16|r32|r64
+ ASMJIT_INST_3x(mul, Mul, ZDX, ZAX, Mem) // ANY [EXPLICIT] xDX:xAX <- xAX * m16|m32|m64
+ ASMJIT_INST_1x(neg, Neg, Gp) // ANY
+ ASMJIT_INST_1x(neg, Neg, Mem) // ANY
+ ASMJIT_INST_0x(nop, Nop) // ANY
+ ASMJIT_INST_1x(nop, Nop, Gp) // ANY
+ ASMJIT_INST_1x(nop, Nop, Mem) // ANY
+ ASMJIT_INST_1x(not_, Not, Gp) // ANY
+ ASMJIT_INST_1x(not_, Not, Mem) // ANY
+ ASMJIT_INST_2x(or_, Or, Gp, Gp) // ANY
+ ASMJIT_INST_2x(or_, Or, Gp, Mem) // ANY
+ ASMJIT_INST_2i(or_, Or, Gp, Imm) // ANY
+ ASMJIT_INST_2x(or_, Or, Mem, Gp) // ANY
+ ASMJIT_INST_2i(or_, Or, Mem, Imm) // ANY
+ ASMJIT_INST_2x(out, Out, Imm, ZAX) // ANY
+ ASMJIT_INST_2i(out, Out, DX, ZAX) // ANY
+ ASMJIT_INST_2i(outs, Outs, DX, DS_ZSI) // ANY
+ ASMJIT_INST_0x(pause, Pause) // SSE2
+ ASMJIT_INST_1x(pop, Pop, Gp) // ANY
+ ASMJIT_INST_1x(pop, Pop, Mem) // ANY
+ ASMJIT_INST_1x(pop, Pop, SReg); // ANY
+ ASMJIT_INST_0x(popa, Popa) // X86
+ ASMJIT_INST_0x(popad, Popad) // X86
+ ASMJIT_INST_0x(popf, Popf) // ANY
+ ASMJIT_INST_0x(popfd, Popfd) // X86
+ ASMJIT_INST_0x(popfq, Popfq) // X64
+ ASMJIT_INST_1x(prefetch, Prefetch, Mem) // 3DNOW
+ ASMJIT_INST_1x(prefetchnta, Prefetchnta, Mem) // SSE
+ ASMJIT_INST_1x(prefetcht0, Prefetcht0, Mem) // SSE
+ ASMJIT_INST_1x(prefetcht1, Prefetcht1, Mem) // SSE
+ ASMJIT_INST_1x(prefetcht2, Prefetcht2, Mem) // SSE
+ ASMJIT_INST_1x(prefetchw, Prefetchw, Mem) // PREFETCHW
+ ASMJIT_INST_1x(prefetchwt1, Prefetchwt1, Mem) // PREFETCHW1
+ ASMJIT_INST_1x(push, Push, Gp) // ANY
+ ASMJIT_INST_1x(push, Push, Mem) // ANY
+ ASMJIT_INST_1x(push, Push, SReg) // ANY
+ ASMJIT_INST_1i(push, Push, Imm) // ANY
+ ASMJIT_INST_0x(pusha, Pusha) // X86
+ ASMJIT_INST_0x(pushad, Pushad) // X86
+ ASMJIT_INST_0x(pushf, Pushf) // ANY
+ ASMJIT_INST_0x(pushfd, Pushfd) // X86
+ ASMJIT_INST_0x(pushfq, Pushfq) // X64
+ ASMJIT_INST_2x(rcl, Rcl, Gp, CL) // ANY
+ ASMJIT_INST_2x(rcl, Rcl, Mem, CL) // ANY
+ ASMJIT_INST_2i(rcl, Rcl, Gp, Imm) // ANY
+ ASMJIT_INST_2i(rcl, Rcl, Mem, Imm) // ANY
+ ASMJIT_INST_2x(rcr, Rcr, Gp, CL) // ANY
+ ASMJIT_INST_2x(rcr, Rcr, Mem, CL) // ANY
+ ASMJIT_INST_2i(rcr, Rcr, Gp, Imm) // ANY
+ ASMJIT_INST_2i(rcr, Rcr, Mem, Imm) // ANY
+ ASMJIT_INST_3x(rdmsr, Rdmsr, EDX, EAX, ECX) // MSR [EXPLICIT] RDX:EAX <- MSR[ECX]
+ ASMJIT_INST_3x(rdpmc, Rdpmc, EDX, EAX, ECX) // ANY [EXPLICIT] RDX:EAX <- PMC[ECX]
+ ASMJIT_INST_2x(rdtsc, Rdtsc, EDX, EAX) // RDTSC [EXPLICIT] EDX:EAX <- Counter
+ ASMJIT_INST_3x(rdtscp, Rdtscp, EDX, EAX, ECX) // RDTSCP [EXPLICIT] EDX:EAX:EXC <- Counter
+ ASMJIT_INST_2x(rol, Rol, Gp, CL) // ANY
+ ASMJIT_INST_2x(rol, Rol, Mem, CL) // ANY
+ ASMJIT_INST_2i(rol, Rol, Gp, Imm) // ANY
+ ASMJIT_INST_2i(rol, Rol, Mem, Imm) // ANY
+ ASMJIT_INST_2x(ror, Ror, Gp, CL) // ANY
+ ASMJIT_INST_2x(ror, Ror, Mem, CL) // ANY
+ ASMJIT_INST_2i(ror, Ror, Gp, Imm) // ANY
+ ASMJIT_INST_2i(ror, Ror, Mem, Imm) // ANY
+ ASMJIT_INST_0x(rsm, Rsm) // X86
+ ASMJIT_INST_2x(sbb, Sbb, Gp, Gp) // ANY
+ ASMJIT_INST_2x(sbb, Sbb, Gp, Mem) // ANY
+ ASMJIT_INST_2i(sbb, Sbb, Gp, Imm) // ANY
+ ASMJIT_INST_2x(sbb, Sbb, Mem, Gp) // ANY
+ ASMJIT_INST_2i(sbb, Sbb, Mem, Imm) // ANY
+ ASMJIT_INST_1x(sahf, Sahf, AH) // LAHFSAHF [EXPLICIT] EFL <- AH
+ ASMJIT_INST_2x(sal, Sal, Gp, CL) // ANY
+ ASMJIT_INST_2x(sal, Sal, Mem, CL) // ANY
+ ASMJIT_INST_2i(sal, Sal, Gp, Imm) // ANY
+ ASMJIT_INST_2i(sal, Sal, Mem, Imm) // ANY
+ ASMJIT_INST_2x(sar, Sar, Gp, CL) // ANY
+ ASMJIT_INST_2x(sar, Sar, Mem, CL) // ANY
+ ASMJIT_INST_2i(sar, Sar, Gp, Imm) // ANY
+ ASMJIT_INST_2i(sar, Sar, Mem, Imm) // ANY
+ ASMJIT_INST_2x(scas, Scas, ZAX, ES_ZDI) // ANY [EXPLICIT]
+ ASMJIT_INST_1c(set, Set, Condition::toSetcc, Gp) // ANY
+ ASMJIT_INST_1c(set, Set, Condition::toSetcc, Mem) // ANY
+ ASMJIT_INST_0x(sfence, Sfence) // SSE
+ ASMJIT_INST_1x(sgdt, Sgdt, Mem) // ANY
+ ASMJIT_INST_2x(shl, Shl, Gp, CL) // ANY
+ ASMJIT_INST_2x(shl, Shl, Mem, CL) // ANY
+ ASMJIT_INST_2i(shl, Shl, Gp, Imm) // ANY
+ ASMJIT_INST_2i(shl, Shl, Mem, Imm) // ANY
+ ASMJIT_INST_2x(shr, Shr, Gp, CL) // ANY
+ ASMJIT_INST_2x(shr, Shr, Mem, CL) // ANY
+ ASMJIT_INST_2i(shr, Shr, Gp, Imm) // ANY
+ ASMJIT_INST_2i(shr, Shr, Mem, Imm) // ANY
+ ASMJIT_INST_3x(shld, Shld, Gp, Gp, CL) // ANY
+ ASMJIT_INST_3x(shld, Shld, Mem, Gp, CL) // ANY
+ ASMJIT_INST_3i(shld, Shld, Gp, Gp, Imm) // ANY
+ ASMJIT_INST_3i(shld, Shld, Mem, Gp, Imm) // ANY
+ ASMJIT_INST_3x(shrd, Shrd, Gp, Gp, CL) // ANY
+ ASMJIT_INST_3x(shrd, Shrd, Mem, Gp, CL) // ANY
+ ASMJIT_INST_3i(shrd, Shrd, Gp, Gp, Imm) // ANY
+ ASMJIT_INST_3i(shrd, Shrd, Mem, Gp, Imm) // ANY
+ ASMJIT_INST_1x(sidt, Sidt, Mem) // ANY
+ ASMJIT_INST_1x(sldt, Sldt, Gp) // ANY
+ ASMJIT_INST_1x(sldt, Sldt, Mem) // ANY
+ ASMJIT_INST_1x(smsw, Smsw, Gp) // ANY
+ ASMJIT_INST_1x(smsw, Smsw, Mem) // ANY
+ ASMJIT_INST_0x(stc, Stc) // ANY
+ ASMJIT_INST_0x(std, Std) // ANY
+ ASMJIT_INST_0x(sti, Sti) // ANY
+ ASMJIT_INST_1x(stmxcsr, Stmxcsr, Mem) // SSE
+ ASMJIT_INST_2x(stos, Stos, ES_ZDI, ZAX) // ANY [EXPLICIT]
+ ASMJIT_INST_1x(str, Str, Gp) // ANY
+ ASMJIT_INST_1x(str, Str, Mem) // ANY
+ ASMJIT_INST_2x(sub, Sub, Gp, Gp) // ANY
+ ASMJIT_INST_2x(sub, Sub, Gp, Mem) // ANY
+ ASMJIT_INST_2i(sub, Sub, Gp, Imm) // ANY
+ ASMJIT_INST_2x(sub, Sub, Mem, Gp) // ANY
+ ASMJIT_INST_2i(sub, Sub, Mem, Imm) // ANY
+ ASMJIT_INST_0x(swapgs, Swapgs) // X64
+ ASMJIT_INST_2x(test, Test, Gp, Gp) // ANY
+ ASMJIT_INST_2i(test, Test, Gp, Imm) // ANY
+ ASMJIT_INST_2x(test, Test, Mem, Gp) // ANY
+ ASMJIT_INST_2i(test, Test, Mem, Imm) // ANY
+ ASMJIT_INST_0x(ud2, Ud2) // ANY
+ ASMJIT_INST_1x(verr, Verr, Gp) // ANY
+ ASMJIT_INST_1x(verr, Verr, Mem) // ANY
+ ASMJIT_INST_1x(verw, Verw, Gp) // ANY
+ ASMJIT_INST_1x(verw, Verw, Mem) // ANY
+ ASMJIT_INST_3x(wrmsr, Wrmsr, EDX, EAX, ECX) // MSR [EXPLICIT] RDX:EAX -> MSR[ECX]
+ ASMJIT_INST_2x(xadd, Xadd, Gp, Gp) // ANY
+ ASMJIT_INST_2x(xadd, Xadd, Mem, Gp) // ANY
+ ASMJIT_INST_2x(xchg, Xchg, Gp, Gp) // ANY
+ ASMJIT_INST_2x(xchg, Xchg, Mem, Gp) // ANY
+ ASMJIT_INST_2x(xchg, Xchg, Gp, Mem) // ANY
+ ASMJIT_INST_2x(xor_, Xor, Gp, Gp) // ANY
+ ASMJIT_INST_2x(xor_, Xor, Gp, Mem) // ANY
+ ASMJIT_INST_2i(xor_, Xor, Gp, Imm) // ANY
+ ASMJIT_INST_2x(xor_, Xor, Mem, Gp) // ANY
+ ASMJIT_INST_2i(xor_, Xor, Mem, Imm) // ANY
+
+ //! \}
+
+ //! \name ADX Instructions
+ //! \{
+
+ ASMJIT_INST_2x(adcx, Adcx, Gp, Gp) // ADX
+ ASMJIT_INST_2x(adcx, Adcx, Gp, Mem) // ADX
+ ASMJIT_INST_2x(adox, Adox, Gp, Gp) // ADX
+ ASMJIT_INST_2x(adox, Adox, Gp, Mem) // ADX
+
+ //! \}
+
+ //! \name BMI Instructions
+ //! \{
+
+ ASMJIT_INST_3x(andn, Andn, Gp, Gp, Gp) // BMI
+ ASMJIT_INST_3x(andn, Andn, Gp, Gp, Mem) // BMI
+ ASMJIT_INST_3x(bextr, Bextr, Gp, Gp, Gp) // BMI
+ ASMJIT_INST_3x(bextr, Bextr, Gp, Mem, Gp) // BMI
+ ASMJIT_INST_2x(blsi, Blsi, Gp, Gp) // BMI
+ ASMJIT_INST_2x(blsi, Blsi, Gp, Mem) // BMI
+ ASMJIT_INST_2x(blsmsk, Blsmsk, Gp, Gp) // BMI
+ ASMJIT_INST_2x(blsmsk, Blsmsk, Gp, Mem) // BMI
+ ASMJIT_INST_2x(blsr, Blsr, Gp, Gp) // BMI
+ ASMJIT_INST_2x(blsr, Blsr, Gp, Mem) // BMI
+ ASMJIT_INST_2x(tzcnt, Tzcnt, Gp, Gp) // BMI
+ ASMJIT_INST_2x(tzcnt, Tzcnt, Gp, Mem) // BMI
+
+ //! \}
+
+ //! \name BMI2 Instructions
+ //! \{
+
+ ASMJIT_INST_3x(bzhi, Bzhi, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(bzhi, Bzhi, Gp, Mem, Gp) // BMI2
+ ASMJIT_INST_4x(mulx, Mulx, Gp, Gp, Gp, ZDX) // BMI2 [EXPLICIT]
+ ASMJIT_INST_4x(mulx, Mulx, Gp, Gp, Mem, ZDX) // BMI2 [EXPLICIT]
+ ASMJIT_INST_3x(pdep, Pdep, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(pdep, Pdep, Gp, Gp, Mem) // BMI2
+ ASMJIT_INST_3x(pext, Pext, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(pext, Pext, Gp, Gp, Mem) // BMI2
+ ASMJIT_INST_3i(rorx, Rorx, Gp, Gp, Imm) // BMI2
+ ASMJIT_INST_3i(rorx, Rorx, Gp, Mem, Imm) // BMI2
+ ASMJIT_INST_3x(sarx, Sarx, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(sarx, Sarx, Gp, Mem, Gp) // BMI2
+ ASMJIT_INST_3x(shlx, Shlx, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(shlx, Shlx, Gp, Mem, Gp) // BMI2
+ ASMJIT_INST_3x(shrx, Shrx, Gp, Gp, Gp) // BMI2
+ ASMJIT_INST_3x(shrx, Shrx, Gp, Mem, Gp) // BMI2
+
+ //! \}
+
+ //! \name CL Instructions
+ //! \{
+
+ ASMJIT_INST_1x(cldemote, Cldemote, Mem) // CLDEMOTE
+ ASMJIT_INST_1x(clflush, Clflush, Mem) // CLFLUSH
+ ASMJIT_INST_1x(clflushopt, Clflushopt, Mem) // CLFLUSH_OPT
+ ASMJIT_INST_1x(clwb, Clwb, Mem) // CLWB
+ ASMJIT_INST_1x(clzero, Clzero, DS_ZAX) // CLZERO [EXPLICIT]
+ ASMJIT_INST_0x(wbnoinvd, Wbnoinvd) // WBNOINVD
+
+ //! \}
+
+ //! \name CRC32 Instructions
+ //! \{
+
+ ASMJIT_INST_2x(crc32, Crc32, Gp, Gp) // SSE4_2
+ ASMJIT_INST_2x(crc32, Crc32, Gp, Mem) // SSE4_2
+
+ //! \}
+
+ //! \name ENQCMD Instructions
+ //! \{
+
+ ASMJIT_INST_2x(enqcmd, Enqcmd, Mem, Mem) // ENQCMD
+ ASMJIT_INST_2x(enqcmds, Enqcmds, Mem, Mem) // ENQCMD
+
+ //! \}
+
+ //! \name FSGSBASE Instructions
+ //! \{
+
+ ASMJIT_INST_1x(rdfsbase, Rdfsbase, Gp) // FSGSBASE
+ ASMJIT_INST_1x(rdgsbase, Rdgsbase, Gp) // FSGSBASE
+ ASMJIT_INST_1x(wrfsbase, Wrfsbase, Gp) // FSGSBASE
+ ASMJIT_INST_1x(wrgsbase, Wrgsbase, Gp) // FSGSBASE
+
+ //! \}
+
+ //! \name FXSR & XSAVE Instructions
+ //! \{
+
+ ASMJIT_INST_1x(fxrstor, Fxrstor, Mem) // FXSR
+ ASMJIT_INST_1x(fxrstor64, Fxrstor64, Mem) // FXSR
+ ASMJIT_INST_1x(fxsave, Fxsave, Mem) // FXSR
+ ASMJIT_INST_1x(fxsave64, Fxsave64, Mem) // FXSR
+ ASMJIT_INST_3x(xgetbv, Xgetbv, EDX, EAX, ECX) // XSAVE [EXPLICIT] EDX:EAX <- XCR[ECX]
+ ASMJIT_INST_3x(xsetbv, Xsetbv, EDX, EAX, ECX) // XSAVE [EXPLICIT] XCR[ECX] <- EDX:EAX
+
+ //! \}
+
+ //! \name LWP Instructions
+ //! \{
+
+ ASMJIT_INST_1x(llwpcb, Llwpcb, Gp) // LWP
+ ASMJIT_INST_3i(lwpins, Lwpins, Gp, Gp, Imm) // LWP
+ ASMJIT_INST_3i(lwpins, Lwpins, Gp, Mem, Imm) // LWP
+ ASMJIT_INST_3i(lwpval, Lwpval, Gp, Gp, Imm) // LWP
+ ASMJIT_INST_3i(lwpval, Lwpval, Gp, Mem, Imm) // LWP
+ ASMJIT_INST_1x(slwpcb, Slwpcb, Gp) // LWP
+
+ //! \}
+
+ //! \name LZCNT Instructions
+ //! \{
+
+ ASMJIT_INST_2x(lzcnt, Lzcnt, Gp, Gp) // LZCNT
+ ASMJIT_INST_2x(lzcnt, Lzcnt, Gp, Mem) // LZCNT
+
+ //! \}
+
+ //! \name MOVBE Instructions
+ //! \{
+
+ ASMJIT_INST_2x(movbe, Movbe, Gp, Mem) // MOVBE
+ ASMJIT_INST_2x(movbe, Movbe, Mem, Gp) // MOVBE
+
+ //! \}
+
+ //! \name MOVDIRI & MOVDIR64B Instructions
+ //! \{
+
+ ASMJIT_INST_2x(movdiri, Movdiri, Mem, Gp) // MOVDIRI
+ ASMJIT_INST_2x(movdir64b, Movdir64b, Mem, Mem) // MOVDIR64B
+
+ //! \}
+
+ //! \name MPX Extensions
+ //! \{
+
+ ASMJIT_INST_2x(bndcl, Bndcl, Bnd, Gp) // MPX
+ ASMJIT_INST_2x(bndcl, Bndcl, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndcn, Bndcn, Bnd, Gp) // MPX
+ ASMJIT_INST_2x(bndcn, Bndcn, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndcu, Bndcu, Bnd, Gp) // MPX
+ ASMJIT_INST_2x(bndcu, Bndcu, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndldx, Bndldx, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndmk, Bndmk, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndmov, Bndmov, Bnd, Bnd) // MPX
+ ASMJIT_INST_2x(bndmov, Bndmov, Bnd, Mem) // MPX
+ ASMJIT_INST_2x(bndmov, Bndmov, Mem, Bnd) // MPX
+ ASMJIT_INST_2x(bndstx, Bndstx, Mem, Bnd) // MPX
+
+ //! \}
+
+ //! \name POPCNT Instructions
+ //! \{
+
+ ASMJIT_INST_2x(popcnt, Popcnt, Gp, Gp) // POPCNT
+ ASMJIT_INST_2x(popcnt, Popcnt, Gp, Mem) // POPCNT
+
+ //! \}
+
+ //! \name RDRAND & RDSEED Instructions
+ //! \{
+
+ ASMJIT_INST_1x(rdrand, Rdrand, Gp) // RDRAND
+ ASMJIT_INST_1x(rdseed, Rdseed, Gp) // RDSEED
+
+ //! \}
+
+ //! \name RTM & TSX Instructions
+ //! \{
+
+ ASMJIT_INST_0x(xabort, Xabort) // RTM
+ ASMJIT_INST_1x(xbegin, Xbegin, Label) // RTM
+ ASMJIT_INST_1x(xbegin, Xbegin, Imm) // RTM
+ ASMJIT_INST_1x(xbegin, Xbegin, uint64_t) // RTM
+ ASMJIT_INST_0x(xend, Xend) // RTM
+ ASMJIT_INST_0x(xtest, Xtest) // TSX
+
+ //! \}
+
+ //! \name SMAP Instructions
+ //! \{
+
+ ASMJIT_INST_0x(clac, Clac) // SMAP
+ ASMJIT_INST_0x(stac, Stac) // SMAP
+
+ //! \}
+
+ //! \name SVM Instructions
+ //! \{
+
+ ASMJIT_INST_0x(clgi, Clgi) // SVM
+ ASMJIT_INST_2x(invlpga, Invlpga, Gp, Gp) // SVM [EXPLICIT] <eax|rax, ecx>
+ ASMJIT_INST_1x(skinit, Skinit, Gp) // SKINIT [EXPLICIT] <eax>
+ ASMJIT_INST_0x(stgi, Stgi) // SKINIT
+ ASMJIT_INST_1x(vmload, Vmload, Gp) // SVM [EXPLICIT] <zax>
+ ASMJIT_INST_0x(vmmcall, Vmmcall) // SVM
+ ASMJIT_INST_1x(vmrun, Vmrun, Gp) // SVM [EXPLICIT] <zax>
+ ASMJIT_INST_1x(vmsave, Vmsave, Gp) // SVM [EXPLICIT] <zax>
+
+ //! \}
+
+ //! \name TBM Instructions
+ //! \{
+
+ ASMJIT_INST_2x(blcfill, Blcfill, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blcfill, Blcfill, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blci, Blci, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blci, Blci, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blcic, Blcic, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blcic, Blcic, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blcmsk, Blcmsk, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blcmsk, Blcmsk, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blcs, Blcs, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blcs, Blcs, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blsfill, Blsfill, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blsfill, Blsfill, Gp, Mem) // TBM
+ ASMJIT_INST_2x(blsic, Blsic, Gp, Gp) // TBM
+ ASMJIT_INST_2x(blsic, Blsic, Gp, Mem) // TBM
+ ASMJIT_INST_2x(t1mskc, T1mskc, Gp, Gp) // TBM
+ ASMJIT_INST_2x(t1mskc, T1mskc, Gp, Mem) // TBM
+ ASMJIT_INST_2x(tzmsk, Tzmsk, Gp, Gp) // TBM
+ ASMJIT_INST_2x(tzmsk, Tzmsk, Gp, Mem) // TBM
+
+ //! \}
+
+ //! \name VMX Instructions
+ //! \{
+
+ ASMJIT_INST_2x(invept, Invept, Gp, Mem) // VMX
+ ASMJIT_INST_2x(invvpid, Invvpid, Gp, Mem) // VMX
+ ASMJIT_INST_0x(vmcall, Vmcall) // VMX
+ ASMJIT_INST_1x(vmclear, Vmclear, Mem) // VMX
+ ASMJIT_INST_0x(vmfunc, Vmfunc) // VMX
+ ASMJIT_INST_0x(vmlaunch, Vmlaunch) // VMX
+ ASMJIT_INST_1x(vmptrld, Vmptrld, Mem) // VMX
+ ASMJIT_INST_1x(vmptrst, Vmptrst, Mem) // VMX
+ ASMJIT_INST_2x(vmread, Vmread, Mem, Gp) // VMX
+ ASMJIT_INST_0x(vmresume, Vmresume) // VMX
+ ASMJIT_INST_2x(vmwrite, Vmwrite, Gp, Mem) // VMX
+ ASMJIT_INST_1x(vmxon, Vmxon, Mem) // VMX
+
+ //! \}
+
+ //! \name Other GP Instructions
+ //! \{
+
+ ASMJIT_INST_0x(getsec, Getsec) // SMX
+ ASMJIT_INST_0x(pcommit, Pcommit) // PCOMMIT
+ ASMJIT_INST_1x(rdpid, Rdpid, Gp) // RDPID
+
+ //! \}
+
+ //! \name FPU Instructions
+ //! \{
+
+ ASMJIT_INST_0x(f2xm1, F2xm1) // FPU
+ ASMJIT_INST_0x(fabs, Fabs) // FPU
+ ASMJIT_INST_2x(fadd, Fadd, St, St) // FPU
+ ASMJIT_INST_1x(fadd, Fadd, Mem) // FPU
+ ASMJIT_INST_1x(faddp, Faddp, St) // FPU
+ ASMJIT_INST_0x(faddp, Faddp) // FPU
+ ASMJIT_INST_1x(fbld, Fbld, Mem) // FPU
+ ASMJIT_INST_1x(fbstp, Fbstp, Mem) // FPU
+ ASMJIT_INST_0x(fchs, Fchs) // FPU
+ ASMJIT_INST_0x(fclex, Fclex) // FPU
+ ASMJIT_INST_1x(fcmovb, Fcmovb, St) // FPU
+ ASMJIT_INST_1x(fcmovbe, Fcmovbe, St) // FPU
+ ASMJIT_INST_1x(fcmove, Fcmove, St) // FPU
+ ASMJIT_INST_1x(fcmovnb, Fcmovnb, St) // FPU
+ ASMJIT_INST_1x(fcmovnbe, Fcmovnbe, St) // FPU
+ ASMJIT_INST_1x(fcmovne, Fcmovne, St) // FPU
+ ASMJIT_INST_1x(fcmovnu, Fcmovnu, St) // FPU
+ ASMJIT_INST_1x(fcmovu, Fcmovu, St) // FPU
+ ASMJIT_INST_1x(fcom, Fcom, St) // FPU
+ ASMJIT_INST_0x(fcom, Fcom) // FPU
+ ASMJIT_INST_1x(fcom, Fcom, Mem) // FPU
+ ASMJIT_INST_1x(fcomp, Fcomp, St) // FPU
+ ASMJIT_INST_0x(fcomp, Fcomp) // FPU
+ ASMJIT_INST_1x(fcomp, Fcomp, Mem) // FPU
+ ASMJIT_INST_0x(fcompp, Fcompp) // FPU
+ ASMJIT_INST_1x(fcomi, Fcomi, St) // FPU
+ ASMJIT_INST_1x(fcomip, Fcomip, St) // FPU
+ ASMJIT_INST_0x(fcos, Fcos) // FPU
+ ASMJIT_INST_0x(fdecstp, Fdecstp) // FPU
+ ASMJIT_INST_2x(fdiv, Fdiv, St, St) // FPU
+ ASMJIT_INST_1x(fdiv, Fdiv, Mem) // FPU
+ ASMJIT_INST_1x(fdivp, Fdivp, St) // FPU
+ ASMJIT_INST_0x(fdivp, Fdivp) // FPU
+ ASMJIT_INST_2x(fdivr, Fdivr, St, St) // FPU
+ ASMJIT_INST_1x(fdivr, Fdivr, Mem) // FPU
+ ASMJIT_INST_1x(fdivrp, Fdivrp, St) // FPU
+ ASMJIT_INST_0x(fdivrp, Fdivrp) // FPU
+ ASMJIT_INST_1x(ffree, Ffree, St) // FPU
+ ASMJIT_INST_1x(fiadd, Fiadd, Mem) // FPU
+ ASMJIT_INST_1x(ficom, Ficom, Mem) // FPU
+ ASMJIT_INST_1x(ficomp, Ficomp, Mem) // FPU
+ ASMJIT_INST_1x(fidiv, Fidiv, Mem) // FPU
+ ASMJIT_INST_1x(fidivr, Fidivr, Mem) // FPU
+ ASMJIT_INST_1x(fild, Fild, Mem) // FPU
+ ASMJIT_INST_1x(fimul, Fimul, Mem) // FPU
+ ASMJIT_INST_0x(fincstp, Fincstp) // FPU
+ ASMJIT_INST_0x(finit, Finit) // FPU
+ ASMJIT_INST_1x(fisub, Fisub, Mem) // FPU
+ ASMJIT_INST_1x(fisubr, Fisubr, Mem) // FPU
+ ASMJIT_INST_0x(fninit, Fninit) // FPU
+ ASMJIT_INST_1x(fist, Fist, Mem) // FPU
+ ASMJIT_INST_1x(fistp, Fistp, Mem) // FPU
+ ASMJIT_INST_1x(fisttp, Fisttp, Mem) // FPU+SSE3
+ ASMJIT_INST_1x(fld, Fld, Mem) // FPU
+ ASMJIT_INST_1x(fld, Fld, St) // FPU
+ ASMJIT_INST_0x(fld1, Fld1) // FPU
+ ASMJIT_INST_0x(fldl2t, Fldl2t) // FPU
+ ASMJIT_INST_0x(fldl2e, Fldl2e) // FPU
+ ASMJIT_INST_0x(fldpi, Fldpi) // FPU
+ ASMJIT_INST_0x(fldlg2, Fldlg2) // FPU
+ ASMJIT_INST_0x(fldln2, Fldln2) // FPU
+ ASMJIT_INST_0x(fldz, Fldz) // FPU
+ ASMJIT_INST_1x(fldcw, Fldcw, Mem) // FPU
+ ASMJIT_INST_1x(fldenv, Fldenv, Mem) // FPU
+ ASMJIT_INST_2x(fmul, Fmul, St, St) // FPU
+ ASMJIT_INST_1x(fmul, Fmul, Mem) // FPU
+ ASMJIT_INST_1x(fmulp, Fmulp, St) // FPU
+ ASMJIT_INST_0x(fmulp, Fmulp) // FPU
+ ASMJIT_INST_0x(fnclex, Fnclex) // FPU
+ ASMJIT_INST_0x(fnop, Fnop) // FPU
+ ASMJIT_INST_1x(fnsave, Fnsave, Mem) // FPU
+ ASMJIT_INST_1x(fnstenv, Fnstenv, Mem) // FPU
+ ASMJIT_INST_1x(fnstcw, Fnstcw, Mem) // FPU
+ ASMJIT_INST_0x(fpatan, Fpatan) // FPU
+ ASMJIT_INST_0x(fprem, Fprem) // FPU
+ ASMJIT_INST_0x(fprem1, Fprem1) // FPU
+ ASMJIT_INST_0x(fptan, Fptan) // FPU
+ ASMJIT_INST_0x(frndint, Frndint) // FPU
+ ASMJIT_INST_1x(frstor, Frstor, Mem) // FPU
+ ASMJIT_INST_1x(fsave, Fsave, Mem) // FPU
+ ASMJIT_INST_0x(fscale, Fscale) // FPU
+ ASMJIT_INST_0x(fsin, Fsin) // FPU
+ ASMJIT_INST_0x(fsincos, Fsincos) // FPU
+ ASMJIT_INST_0x(fsqrt, Fsqrt) // FPU
+ ASMJIT_INST_1x(fst, Fst, Mem) // FPU
+ ASMJIT_INST_1x(fst, Fst, St) // FPU
+ ASMJIT_INST_1x(fstp, Fstp, Mem) // FPU
+ ASMJIT_INST_1x(fstp, Fstp, St) // FPU
+ ASMJIT_INST_1x(fstcw, Fstcw, Mem) // FPU
+ ASMJIT_INST_1x(fstenv, Fstenv, Mem) // FPU
+ ASMJIT_INST_2x(fsub, Fsub, St, St) // FPU
+ ASMJIT_INST_1x(fsub, Fsub, Mem) // FPU
+ ASMJIT_INST_1x(fsubp, Fsubp, St) // FPU
+ ASMJIT_INST_0x(fsubp, Fsubp) // FPU
+ ASMJIT_INST_2x(fsubr, Fsubr, St, St) // FPU
+ ASMJIT_INST_1x(fsubr, Fsubr, Mem) // FPU
+ ASMJIT_INST_1x(fsubrp, Fsubrp, St) // FPU
+ ASMJIT_INST_0x(fsubrp, Fsubrp) // FPU
+ ASMJIT_INST_0x(ftst, Ftst) // FPU
+ ASMJIT_INST_1x(fucom, Fucom, St) // FPU
+ ASMJIT_INST_0x(fucom, Fucom) // FPU
+ ASMJIT_INST_1x(fucomi, Fucomi, St) // FPU
+ ASMJIT_INST_1x(fucomip, Fucomip, St) // FPU
+ ASMJIT_INST_1x(fucomp, Fucomp, St) // FPU
+ ASMJIT_INST_0x(fucomp, Fucomp) // FPU
+ ASMJIT_INST_0x(fucompp, Fucompp) // FPU
+ ASMJIT_INST_0x(fwait, Fwait) // FPU
+ ASMJIT_INST_0x(fxam, Fxam) // FPU
+ ASMJIT_INST_1x(fxch, Fxch, St) // FPU
+ ASMJIT_INST_0x(fxtract, Fxtract) // FPU
+ ASMJIT_INST_0x(fyl2x, Fyl2x) // FPU
+ ASMJIT_INST_0x(fyl2xp1, Fyl2xp1) // FPU
+ ASMJIT_INST_1x(fstsw, Fstsw, Gp) // FPU
+ ASMJIT_INST_1x(fstsw, Fstsw, Mem) // FPU
+ ASMJIT_INST_1x(fnstsw, Fnstsw, Gp) // FPU
+ ASMJIT_INST_1x(fnstsw, Fnstsw, Mem) // FPU
+
+ //! \}
+
+ //! \name MMX & SSE+ Instructions
+ //! \{
+
+ ASMJIT_INST_2x(addpd, Addpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(addpd, Addpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(addps, Addps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(addps, Addps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(addsd, Addsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(addsd, Addsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(addss, Addss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(addss, Addss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(addsubpd, Addsubpd, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(addsubpd, Addsubpd, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(addsubps, Addsubps, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(addsubps, Addsubps, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(andnpd, Andnpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(andnpd, Andnpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(andnps, Andnps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(andnps, Andnps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(andpd, Andpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(andpd, Andpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(andps, Andps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(andps, Andps, Xmm, Mem) // SSE
+ ASMJIT_INST_3i(blendpd, Blendpd, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(blendpd, Blendpd, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(blendps, Blendps, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(blendps, Blendps, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3x(blendvpd, Blendvpd, Xmm, Xmm, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3x(blendvpd, Blendvpd, Xmm, Mem, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3x(blendvps, Blendvps, Xmm, Xmm, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3x(blendvps, Blendvps, Xmm, Mem, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3i(cmppd, Cmppd, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(cmppd, Cmppd, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(cmpps, Cmpps, Xmm, Xmm, Imm) // SSE
+ ASMJIT_INST_3i(cmpps, Cmpps, Xmm, Mem, Imm) // SSE
+ ASMJIT_INST_3i(cmpsd, Cmpsd, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(cmpsd, Cmpsd, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(cmpss, Cmpss, Xmm, Xmm, Imm) // SSE
+ ASMJIT_INST_3i(cmpss, Cmpss, Xmm, Mem, Imm) // SSE
+ ASMJIT_INST_2x(comisd, Comisd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(comisd, Comisd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(comiss, Comiss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(comiss, Comiss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(cvtdq2pd, Cvtdq2pd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtdq2pd, Cvtdq2pd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtdq2ps, Cvtdq2ps, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtdq2ps, Cvtdq2ps, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtpd2dq, Cvtpd2dq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtpd2dq, Cvtpd2dq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtpd2pi, Cvtpd2pi, Mm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtpd2pi, Cvtpd2pi, Mm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtpd2ps, Cvtpd2ps, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtpd2ps, Cvtpd2ps, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtpi2pd, Cvtpi2pd, Xmm, Mm) // SSE2
+ ASMJIT_INST_2x(cvtpi2pd, Cvtpi2pd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtpi2ps, Cvtpi2ps, Xmm, Mm) // SSE
+ ASMJIT_INST_2x(cvtpi2ps, Cvtpi2ps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(cvtps2dq, Cvtps2dq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtps2dq, Cvtps2dq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtps2pd, Cvtps2pd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtps2pd, Cvtps2pd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtps2pi, Cvtps2pi, Mm, Xmm) // SSE
+ ASMJIT_INST_2x(cvtps2pi, Cvtps2pi, Mm, Mem) // SSE
+ ASMJIT_INST_2x(cvtsd2si, Cvtsd2si, Gp, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtsd2si, Cvtsd2si, Gp, Mem) // SSE2
+ ASMJIT_INST_2x(cvtsd2ss, Cvtsd2ss, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtsd2ss, Cvtsd2ss, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtsi2sd, Cvtsi2sd, Xmm, Gp) // SSE2
+ ASMJIT_INST_2x(cvtsi2sd, Cvtsi2sd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtsi2ss, Cvtsi2ss, Xmm, Gp) // SSE
+ ASMJIT_INST_2x(cvtsi2ss, Cvtsi2ss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(cvtss2sd, Cvtss2sd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvtss2sd, Cvtss2sd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvtss2si, Cvtss2si, Gp, Xmm) // SSE
+ ASMJIT_INST_2x(cvtss2si, Cvtss2si, Gp, Mem) // SSE
+ ASMJIT_INST_2x(cvttpd2pi, Cvttpd2pi, Mm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvttpd2pi, Cvttpd2pi, Mm, Mem) // SSE2
+ ASMJIT_INST_2x(cvttpd2dq, Cvttpd2dq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvttpd2dq, Cvttpd2dq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvttps2dq, Cvttps2dq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(cvttps2dq, Cvttps2dq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(cvttps2pi, Cvttps2pi, Mm, Xmm) // SSE
+ ASMJIT_INST_2x(cvttps2pi, Cvttps2pi, Mm, Mem) // SSE
+ ASMJIT_INST_2x(cvttsd2si, Cvttsd2si, Gp, Xmm) // SSE2
+ ASMJIT_INST_2x(cvttsd2si, Cvttsd2si, Gp, Mem) // SSE2
+ ASMJIT_INST_2x(cvttss2si, Cvttss2si, Gp, Xmm) // SSE
+ ASMJIT_INST_2x(cvttss2si, Cvttss2si, Gp, Mem) // SSE
+ ASMJIT_INST_2x(divpd, Divpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(divpd, Divpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(divps, Divps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(divps, Divps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(divsd, Divsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(divsd, Divsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(divss, Divss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(divss, Divss, Xmm, Mem) // SSE
+ ASMJIT_INST_3i(dppd, Dppd, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(dppd, Dppd, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(dpps, Dpps, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(dpps, Dpps, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(extractps, Extractps, Gp, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(extractps, Extractps, Mem, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_2x(extrq, Extrq, Xmm, Xmm) // SSE4A
+ ASMJIT_INST_3ii(extrq, Extrq, Xmm, Imm, Imm) // SSE4A
+ ASMJIT_INST_3i(gf2p8affineinvqb, Gf2p8affineinvqb, Xmm, Xmm, Imm) // GFNI
+ ASMJIT_INST_3i(gf2p8affineinvqb, Gf2p8affineinvqb, Xmm, Mem, Imm) // GFNI
+ ASMJIT_INST_3i(gf2p8affineqb, Gf2p8affineqb, Xmm, Xmm, Imm) // GFNI
+ ASMJIT_INST_3i(gf2p8affineqb, Gf2p8affineqb, Xmm, Mem, Imm) // GFNI
+ ASMJIT_INST_2x(gf2p8mulb, Gf2p8mulb, Xmm, Xmm) // GFNI
+ ASMJIT_INST_2x(gf2p8mulb, Gf2p8mulb, Xmm, Mem) // GFNI
+ ASMJIT_INST_2x(haddpd, Haddpd, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(haddpd, Haddpd, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(haddps, Haddps, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(haddps, Haddps, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(hsubpd, Hsubpd, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(hsubpd, Hsubpd, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(hsubps, Hsubps, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(hsubps, Hsubps, Xmm, Mem) // SSE3
+ ASMJIT_INST_3i(insertps, Insertps, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(insertps, Insertps, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_2x(insertq, Insertq, Xmm, Xmm) // SSE4A
+ ASMJIT_INST_4ii(insertq, Insertq, Xmm, Xmm, Imm, Imm) // SSE4A
+ ASMJIT_INST_2x(lddqu, Lddqu, Xmm, Mem) // SSE3
+ ASMJIT_INST_3x(maskmovq, Maskmovq, Mm, Mm, DS_ZDI) // SSE [EXPLICIT]
+ ASMJIT_INST_3x(maskmovdqu, Maskmovdqu, Xmm, Xmm, DS_ZDI) // SSE2 [EXPLICIT]
+ ASMJIT_INST_2x(maxpd, Maxpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(maxpd, Maxpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(maxps, Maxps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(maxps, Maxps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(maxsd, Maxsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(maxsd, Maxsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(maxss, Maxss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(maxss, Maxss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(minpd, Minpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(minpd, Minpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(minps, Minps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(minps, Minps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(minsd, Minsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(minsd, Minsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(minss, Minss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(minss, Minss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movapd, Movapd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(movapd, Movapd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movapd, Movapd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movaps, Movaps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movaps, Movaps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movaps, Movaps, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movd, Movd, Mem, Mm) // MMX
+ ASMJIT_INST_2x(movd, Movd, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movd, Movd, Gp, Mm) // MMX
+ ASMJIT_INST_2x(movd, Movd, Gp, Xmm) // SSE
+ ASMJIT_INST_2x(movd, Movd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(movd, Movd, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movd, Movd, Mm, Gp) // MMX
+ ASMJIT_INST_2x(movd, Movd, Xmm, Gp) // SSE
+ ASMJIT_INST_2x(movddup, Movddup, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(movddup, Movddup, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(movdq2q, Movdq2q, Mm, Xmm) // SSE2
+ ASMJIT_INST_2x(movdqa, Movdqa, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(movdqa, Movdqa, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movdqa, Movdqa, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movdqu, Movdqu, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(movdqu, Movdqu, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movdqu, Movdqu, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movhlps, Movhlps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movhpd, Movhpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movhpd, Movhpd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movhps, Movhps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movhps, Movhps, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movlhps, Movlhps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movlpd, Movlpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movlpd, Movlpd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movlps, Movlps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movlps, Movlps, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movmskps, Movmskps, Gp, Xmm) // SSE2
+ ASMJIT_INST_2x(movmskpd, Movmskpd, Gp, Xmm) // SSE2
+ ASMJIT_INST_2x(movntdq, Movntdq, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movntdqa, Movntdqa, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(movntpd, Movntpd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movntps, Movntps, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movntsd, Movntsd, Mem, Xmm) // SSE4A
+ ASMJIT_INST_2x(movntss, Movntss, Mem, Xmm) // SSE4A
+ ASMJIT_INST_2x(movntq, Movntq, Mem, Mm) // SSE
+ ASMJIT_INST_2x(movq, Movq, Mm, Mm) // MMX
+ ASMJIT_INST_2x(movq, Movq, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movq, Movq, Mem, Mm) // MMX
+ ASMJIT_INST_2x(movq, Movq, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movq, Movq, Mm, Mem) // MMX
+ ASMJIT_INST_2x(movq, Movq, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movq, Movq, Gp, Mm) // MMX
+ ASMJIT_INST_2x(movq, Movq, Gp, Xmm) // SSE+X64.
+ ASMJIT_INST_2x(movq, Movq, Mm, Gp) // MMX
+ ASMJIT_INST_2x(movq, Movq, Xmm, Gp) // SSE+X64.
+ ASMJIT_INST_2x(movq2dq, Movq2dq, Xmm, Mm) // SSE2
+ ASMJIT_INST_2x(movsd, Movsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(movsd, Movsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movsd, Movsd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movshdup, Movshdup, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(movshdup, Movshdup, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(movsldup, Movsldup, Xmm, Xmm) // SSE3
+ ASMJIT_INST_2x(movsldup, Movsldup, Xmm, Mem) // SSE3
+ ASMJIT_INST_2x(movss, Movss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movss, Movss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movss, Movss, Mem, Xmm) // SSE
+ ASMJIT_INST_2x(movupd, Movupd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(movupd, Movupd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(movupd, Movupd, Mem, Xmm) // SSE2
+ ASMJIT_INST_2x(movups, Movups, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(movups, Movups, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(movups, Movups, Mem, Xmm) // SSE
+ ASMJIT_INST_3i(mpsadbw, Mpsadbw, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(mpsadbw, Mpsadbw, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_2x(mulpd, Mulpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(mulpd, Mulpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(mulps, Mulps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(mulps, Mulps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(mulsd, Mulsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(mulsd, Mulsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(mulss, Mulss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(mulss, Mulss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(orpd, Orpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(orpd, Orpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(orps, Orps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(orps, Orps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(packssdw, Packssdw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(packssdw, Packssdw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(packssdw, Packssdw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(packssdw, Packssdw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(packsswb, Packsswb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(packsswb, Packsswb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(packsswb, Packsswb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(packsswb, Packsswb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(packusdw, Packusdw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(packusdw, Packusdw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(packuswb, Packuswb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(packuswb, Packuswb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(packuswb, Packuswb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(packuswb, Packuswb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pabsb, Pabsb, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pabsb, Pabsb, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pabsb, Pabsb, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pabsb, Pabsb, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(pabsd, Pabsd, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pabsd, Pabsd, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pabsd, Pabsd, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pabsd, Pabsd, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(pabsw, Pabsw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pabsw, Pabsw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pabsw, Pabsw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pabsw, Pabsw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(paddb, Paddb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddb, Paddb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddb, Paddb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddb, Paddb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddd, Paddd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddd, Paddd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddd, Paddd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddd, Paddd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddq, Paddq, Mm, Mm) // SSE2
+ ASMJIT_INST_2x(paddq, Paddq, Mm, Mem) // SSE2
+ ASMJIT_INST_2x(paddq, Paddq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddq, Paddq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddsb, Paddsb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddsb, Paddsb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddsb, Paddsb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddsb, Paddsb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddsw, Paddsw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddsw, Paddsw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddsw, Paddsw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddsw, Paddsw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddusb, Paddusb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddusb, Paddusb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddusb, Paddusb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddusb, Paddusb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddusw, Paddusw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddusw, Paddusw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddusw, Paddusw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddusw, Paddusw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(paddw, Paddw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(paddw, Paddw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(paddw, Paddw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(paddw, Paddw, Xmm, Mem) // SSE2
+ ASMJIT_INST_3i(palignr, Palignr, Mm, Mm, Imm) // SSSE3
+ ASMJIT_INST_3i(palignr, Palignr, Mm, Mem, Imm) // SSSE3
+ ASMJIT_INST_3i(palignr, Palignr, Xmm, Xmm, Imm) // SSSE3
+ ASMJIT_INST_3i(palignr, Palignr, Xmm, Mem, Imm) // SSSE3
+ ASMJIT_INST_2x(pand, Pand, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pand, Pand, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pand, Pand, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pand, Pand, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pandn, Pandn, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pandn, Pandn, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pandn, Pandn, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pandn, Pandn, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pavgb, Pavgb, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pavgb, Pavgb, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pavgb, Pavgb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pavgb, Pavgb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pavgw, Pavgw, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pavgw, Pavgw, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pavgw, Pavgw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pavgw, Pavgw, Xmm, Mem) // SSE2
+ ASMJIT_INST_3x(pblendvb, Pblendvb, Xmm, Xmm, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3x(pblendvb, Pblendvb, Xmm, Mem, XMM0) // SSE4_1 [EXPLICIT]
+ ASMJIT_INST_3i(pblendw, Pblendw, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pblendw, Pblendw, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(pclmulqdq, Pclmulqdq, Xmm, Xmm, Imm) // PCLMULQDQ.
+ ASMJIT_INST_3i(pclmulqdq, Pclmulqdq, Xmm, Mem, Imm) // PCLMULQDQ.
+ ASMJIT_INST_6x(pcmpestri, Pcmpestri, Xmm, Xmm, Imm, ECX, EAX, EDX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_6x(pcmpestri, Pcmpestri, Xmm, Mem, Imm, ECX, EAX, EDX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_6x(pcmpestrm, Pcmpestrm, Xmm, Xmm, Imm, XMM0, EAX, EDX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_6x(pcmpestrm, Pcmpestrm, Xmm, Mem, Imm, XMM0, EAX, EDX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_2x(pcmpeqb, Pcmpeqb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpeqb, Pcmpeqb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpeqb, Pcmpeqb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpeqb, Pcmpeqb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pcmpeqd, Pcmpeqd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpeqd, Pcmpeqd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpeqd, Pcmpeqd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpeqd, Pcmpeqd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pcmpeqq, Pcmpeqq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pcmpeqq, Pcmpeqq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pcmpeqw, Pcmpeqw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpeqw, Pcmpeqw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpeqw, Pcmpeqw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpeqw, Pcmpeqw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pcmpgtb, Pcmpgtb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpgtb, Pcmpgtb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpgtb, Pcmpgtb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpgtb, Pcmpgtb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pcmpgtd, Pcmpgtd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpgtd, Pcmpgtd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpgtd, Pcmpgtd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpgtd, Pcmpgtd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pcmpgtq, Pcmpgtq, Xmm, Xmm) // SSE4_2.
+ ASMJIT_INST_2x(pcmpgtq, Pcmpgtq, Xmm, Mem) // SSE4_2.
+ ASMJIT_INST_2x(pcmpgtw, Pcmpgtw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pcmpgtw, Pcmpgtw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pcmpgtw, Pcmpgtw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pcmpgtw, Pcmpgtw, Xmm, Mem) // SSE2
+ ASMJIT_INST_4x(pcmpistri, Pcmpistri, Xmm, Xmm, Imm, ECX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_4x(pcmpistri, Pcmpistri, Xmm, Mem, Imm, ECX) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_4x(pcmpistrm, Pcmpistrm, Xmm, Xmm, Imm, XMM0) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_4x(pcmpistrm, Pcmpistrm, Xmm, Mem, Imm, XMM0) // SSE4_2 [EXPLICIT]
+ ASMJIT_INST_3i(pextrb, Pextrb, Gp, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrb, Pextrb, Mem, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrd, Pextrd, Gp, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrd, Pextrd, Mem, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrq, Pextrq, Gp, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrq, Pextrq, Mem, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(pextrw, Pextrw, Gp, Mm, Imm) // SSE
+ ASMJIT_INST_3i(pextrw, Pextrw, Gp, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(pextrw, Pextrw, Mem, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_2x(phaddd, Phaddd, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phaddd, Phaddd, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phaddd, Phaddd, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phaddd, Phaddd, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(phaddsw, Phaddsw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phaddsw, Phaddsw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phaddsw, Phaddsw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phaddsw, Phaddsw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(phaddw, Phaddw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phaddw, Phaddw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phaddw, Phaddw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phaddw, Phaddw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(phminposuw, Phminposuw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(phminposuw, Phminposuw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(phsubd, Phsubd, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phsubd, Phsubd, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phsubd, Phsubd, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phsubd, Phsubd, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(phsubsw, Phsubsw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phsubsw, Phsubsw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phsubsw, Phsubsw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phsubsw, Phsubsw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(phsubw, Phsubw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(phsubw, Phsubw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(phsubw, Phsubw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(phsubw, Phsubw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_3i(pinsrb, Pinsrb, Xmm, Gp, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrb, Pinsrb, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrd, Pinsrd, Xmm, Gp, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrd, Pinsrd, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrq, Pinsrq, Xmm, Gp, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrq, Pinsrq, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(pinsrw, Pinsrw, Mm, Gp, Imm) // SSE
+ ASMJIT_INST_3i(pinsrw, Pinsrw, Mm, Mem, Imm) // SSE
+ ASMJIT_INST_3i(pinsrw, Pinsrw, Xmm, Gp, Imm) // SSE2
+ ASMJIT_INST_3i(pinsrw, Pinsrw, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_2x(pmaddubsw, Pmaddubsw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pmaddubsw, Pmaddubsw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pmaddubsw, Pmaddubsw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pmaddubsw, Pmaddubsw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(pmaddwd, Pmaddwd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pmaddwd, Pmaddwd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pmaddwd, Pmaddwd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmaddwd, Pmaddwd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmaxsb, Pmaxsb, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmaxsb, Pmaxsb, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmaxsd, Pmaxsd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmaxsd, Pmaxsd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmaxsw, Pmaxsw, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pmaxsw, Pmaxsw, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pmaxsw, Pmaxsw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmaxsw, Pmaxsw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmaxub, Pmaxub, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pmaxub, Pmaxub, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pmaxub, Pmaxub, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmaxub, Pmaxub, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmaxud, Pmaxud, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmaxud, Pmaxud, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmaxuw, Pmaxuw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmaxuw, Pmaxuw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pminsb, Pminsb, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pminsb, Pminsb, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pminsd, Pminsd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pminsd, Pminsd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pminsw, Pminsw, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pminsw, Pminsw, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pminsw, Pminsw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pminsw, Pminsw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pminub, Pminub, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pminub, Pminub, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pminub, Pminub, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pminub, Pminub, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pminud, Pminud, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pminud, Pminud, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pminuw, Pminuw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pminuw, Pminuw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovmskb, Pmovmskb, Gp, Mm) // SSE
+ ASMJIT_INST_2x(pmovmskb, Pmovmskb, Gp, Xmm) // SSE2
+ ASMJIT_INST_2x(pmovsxbd, Pmovsxbd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxbd, Pmovsxbd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovsxbq, Pmovsxbq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxbq, Pmovsxbq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovsxbw, Pmovsxbw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxbw, Pmovsxbw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovsxdq, Pmovsxdq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxdq, Pmovsxdq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovsxwd, Pmovsxwd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxwd, Pmovsxwd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovsxwq, Pmovsxwq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovsxwq, Pmovsxwq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbd, Pmovzxbd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbd, Pmovzxbd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbq, Pmovzxbq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbq, Pmovzxbq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbw, Pmovzxbw, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxbw, Pmovzxbw, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxdq, Pmovzxdq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxdq, Pmovzxdq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxwd, Pmovzxwd, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxwd, Pmovzxwd, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmovzxwq, Pmovzxwq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmovzxwq, Pmovzxwq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmuldq, Pmuldq, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmuldq, Pmuldq, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmulhrsw, Pmulhrsw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pmulhrsw, Pmulhrsw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pmulhrsw, Pmulhrsw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pmulhrsw, Pmulhrsw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(pmulhw, Pmulhw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pmulhw, Pmulhw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pmulhw, Pmulhw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmulhw, Pmulhw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmulhuw, Pmulhuw, Mm, Mm) // SSE
+ ASMJIT_INST_2x(pmulhuw, Pmulhuw, Mm, Mem) // SSE
+ ASMJIT_INST_2x(pmulhuw, Pmulhuw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmulhuw, Pmulhuw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmulld, Pmulld, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(pmulld, Pmulld, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(pmullw, Pmullw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pmullw, Pmullw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pmullw, Pmullw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmullw, Pmullw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pmuludq, Pmuludq, Mm, Mm) // SSE2
+ ASMJIT_INST_2x(pmuludq, Pmuludq, Mm, Mem) // SSE2
+ ASMJIT_INST_2x(pmuludq, Pmuludq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pmuludq, Pmuludq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(por, Por, Mm, Mm) // MMX
+ ASMJIT_INST_2x(por, Por, Mm, Mem) // MMX
+ ASMJIT_INST_2x(por, Por, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(por, Por, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psadbw, Psadbw, Mm, Mm) // SSE
+ ASMJIT_INST_2x(psadbw, Psadbw, Mm, Mem) // SSE
+ ASMJIT_INST_2x(psadbw, Psadbw, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(psadbw, Psadbw, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(pslld, Pslld, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pslld, Pslld, Mm, Mem) // MMX
+ ASMJIT_INST_2i(pslld, Pslld, Mm, Imm) // MMX
+ ASMJIT_INST_2x(pslld, Pslld, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pslld, Pslld, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(pslld, Pslld, Xmm, Imm) // SSE2
+ ASMJIT_INST_2i(pslldq, Pslldq, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psllq, Psllq, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psllq, Psllq, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psllq, Psllq, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psllq, Psllq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psllq, Psllq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psllq, Psllq, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psllw, Psllw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psllw, Psllw, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psllw, Psllw, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psllw, Psllw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psllw, Psllw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psllw, Psllw, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psrad, Psrad, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psrad, Psrad, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psrad, Psrad, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psrad, Psrad, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psrad, Psrad, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psrad, Psrad, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psraw, Psraw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psraw, Psraw, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psraw, Psraw, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psraw, Psraw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psraw, Psraw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psraw, Psraw, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(pshufb, Pshufb, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(pshufb, Pshufb, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(pshufb, Pshufb, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(pshufb, Pshufb, Xmm, Mem) // SSSE3
+ ASMJIT_INST_3i(pshufd, Pshufd, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(pshufd, Pshufd, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(pshufhw, Pshufhw, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(pshufhw, Pshufhw, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(pshuflw, Pshuflw, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(pshuflw, Pshuflw, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(pshufw, Pshufw, Mm, Mm, Imm) // SSE
+ ASMJIT_INST_3i(pshufw, Pshufw, Mm, Mem, Imm) // SSE
+ ASMJIT_INST_2x(psignb, Psignb, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(psignb, Psignb, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(psignb, Psignb, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(psignb, Psignb, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(psignd, Psignd, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(psignd, Psignd, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(psignd, Psignd, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(psignd, Psignd, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(psignw, Psignw, Mm, Mm) // SSSE3
+ ASMJIT_INST_2x(psignw, Psignw, Mm, Mem) // SSSE3
+ ASMJIT_INST_2x(psignw, Psignw, Xmm, Xmm) // SSSE3
+ ASMJIT_INST_2x(psignw, Psignw, Xmm, Mem) // SSSE3
+ ASMJIT_INST_2x(psrld, Psrld, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psrld, Psrld, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psrld, Psrld, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psrld, Psrld, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psrld, Psrld, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psrld, Psrld, Xmm, Imm) // SSE2
+ ASMJIT_INST_2i(psrldq, Psrldq, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psrlq, Psrlq, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psrlq, Psrlq, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psrlq, Psrlq, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psrlq, Psrlq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psrlq, Psrlq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psrlq, Psrlq, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psrlw, Psrlw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psrlw, Psrlw, Mm, Mem) // MMX
+ ASMJIT_INST_2i(psrlw, Psrlw, Mm, Imm) // MMX
+ ASMJIT_INST_2x(psrlw, Psrlw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psrlw, Psrlw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2i(psrlw, Psrlw, Xmm, Imm) // SSE2
+ ASMJIT_INST_2x(psubb, Psubb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubb, Psubb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubb, Psubb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubb, Psubb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubd, Psubd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubd, Psubd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubd, Psubd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubd, Psubd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubq, Psubq, Mm, Mm) // SSE2
+ ASMJIT_INST_2x(psubq, Psubq, Mm, Mem) // SSE2
+ ASMJIT_INST_2x(psubq, Psubq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubq, Psubq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubsb, Psubsb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubsb, Psubsb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubsb, Psubsb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubsb, Psubsb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubsw, Psubsw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubsw, Psubsw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubsw, Psubsw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubsw, Psubsw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubusb, Psubusb, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubusb, Psubusb, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubusb, Psubusb, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubusb, Psubusb, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubusw, Psubusw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubusw, Psubusw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubusw, Psubusw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubusw, Psubusw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(psubw, Psubw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(psubw, Psubw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(psubw, Psubw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(psubw, Psubw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(ptest, Ptest, Xmm, Xmm) // SSE4_1
+ ASMJIT_INST_2x(ptest, Ptest, Xmm, Mem) // SSE4_1
+ ASMJIT_INST_2x(punpckhbw, Punpckhbw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpckhbw, Punpckhbw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpckhbw, Punpckhbw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpckhbw, Punpckhbw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpckhdq, Punpckhdq, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpckhdq, Punpckhdq, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpckhdq, Punpckhdq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpckhdq, Punpckhdq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpckhqdq, Punpckhqdq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpckhqdq, Punpckhqdq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpckhwd, Punpckhwd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpckhwd, Punpckhwd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpckhwd, Punpckhwd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpckhwd, Punpckhwd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpcklbw, Punpcklbw, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpcklbw, Punpcklbw, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpcklbw, Punpcklbw, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpcklbw, Punpcklbw, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpckldq, Punpckldq, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpckldq, Punpckldq, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpckldq, Punpckldq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpckldq, Punpckldq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpcklqdq, Punpcklqdq, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpcklqdq, Punpcklqdq, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(punpcklwd, Punpcklwd, Mm, Mm) // MMX
+ ASMJIT_INST_2x(punpcklwd, Punpcklwd, Mm, Mem) // MMX
+ ASMJIT_INST_2x(punpcklwd, Punpcklwd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(punpcklwd, Punpcklwd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(pxor, Pxor, Mm, Mm) // MMX
+ ASMJIT_INST_2x(pxor, Pxor, Mm, Mem) // MMX
+ ASMJIT_INST_2x(pxor, Pxor, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(pxor, Pxor, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(rcpps, Rcpps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(rcpps, Rcpps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(rcpss, Rcpss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(rcpss, Rcpss, Xmm, Mem) // SSE
+ ASMJIT_INST_3i(roundpd, Roundpd, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundpd, Roundpd, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundps, Roundps, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundps, Roundps, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundsd, Roundsd, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundsd, Roundsd, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundss, Roundss, Xmm, Xmm, Imm) // SSE4_1
+ ASMJIT_INST_3i(roundss, Roundss, Xmm, Mem, Imm) // SSE4_1
+ ASMJIT_INST_2x(rsqrtps, Rsqrtps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(rsqrtps, Rsqrtps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(rsqrtss, Rsqrtss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(rsqrtss, Rsqrtss, Xmm, Mem) // SSE
+ ASMJIT_INST_3i(shufpd, Shufpd, Xmm, Xmm, Imm) // SSE2
+ ASMJIT_INST_3i(shufpd, Shufpd, Xmm, Mem, Imm) // SSE2
+ ASMJIT_INST_3i(shufps, Shufps, Xmm, Xmm, Imm) // SSE
+ ASMJIT_INST_3i(shufps, Shufps, Xmm, Mem, Imm) // SSE
+ ASMJIT_INST_2x(sqrtpd, Sqrtpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(sqrtpd, Sqrtpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(sqrtps, Sqrtps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(sqrtps, Sqrtps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(sqrtsd, Sqrtsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(sqrtsd, Sqrtsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(sqrtss, Sqrtss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(sqrtss, Sqrtss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(subpd, Subpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(subpd, Subpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(subps, Subps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(subps, Subps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(subsd, Subsd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(subsd, Subsd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(subss, Subss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(subss, Subss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(ucomisd, Ucomisd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(ucomisd, Ucomisd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(ucomiss, Ucomiss, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(ucomiss, Ucomiss, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(unpckhpd, Unpckhpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(unpckhpd, Unpckhpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(unpckhps, Unpckhps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(unpckhps, Unpckhps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(unpcklpd, Unpcklpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(unpcklpd, Unpcklpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(unpcklps, Unpcklps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(unpcklps, Unpcklps, Xmm, Mem) // SSE
+ ASMJIT_INST_2x(xorpd, Xorpd, Xmm, Xmm) // SSE2
+ ASMJIT_INST_2x(xorpd, Xorpd, Xmm, Mem) // SSE2
+ ASMJIT_INST_2x(xorps, Xorps, Xmm, Xmm) // SSE
+ ASMJIT_INST_2x(xorps, Xorps, Xmm, Mem) // SSE
+
+ //! \}
+
+ //! \name 3DNOW and GEODE Instructions (Deprecated)
+ //! \{
+
+ ASMJIT_INST_2x(pavgusb, Pavgusb, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pavgusb, Pavgusb, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pf2id, Pf2id, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pf2id, Pf2id, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pf2iw, Pf2iw, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pf2iw, Pf2iw, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfacc, Pfacc, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfacc, Pfacc, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfadd, Pfadd, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfadd, Pfadd, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfcmpeq, Pfcmpeq, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfcmpeq, Pfcmpeq, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfcmpge, Pfcmpge, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfcmpge, Pfcmpge, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfcmpgt, Pfcmpgt, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfcmpgt, Pfcmpgt, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfmax, Pfmax, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfmax, Pfmax, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfmin, Pfmin, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfmin, Pfmin, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfmul, Pfmul, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfmul, Pfmul, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfnacc, Pfnacc, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfnacc, Pfnacc, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfpnacc, Pfpnacc, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfpnacc, Pfpnacc, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrcp, Pfrcp, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfrcp, Pfrcp, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrcpit1, Pfrcpit1, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfrcpit1, Pfrcpit1, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrcpit2, Pfrcpit2, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfrcpit2, Pfrcpit2, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrcpv, Pfrcpv, Mm, Mm) // GEODE
+ ASMJIT_INST_2x(pfrcpv, Pfrcpv, Mm, Mem) // GEODE
+ ASMJIT_INST_2x(pfrsqit1, Pfrsqit1, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfrsqit1, Pfrsqit1, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrsqrt, Pfrsqrt, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfrsqrt, Pfrsqrt, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfrsqrtv, Pfrsqrtv, Mm, Mm) // GEODE
+ ASMJIT_INST_2x(pfrsqrtv, Pfrsqrtv, Mm, Mem) // GEODE
+ ASMJIT_INST_2x(pfsub, Pfsub, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfsub, Pfsub, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pfsubr, Pfsubr, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pfsubr, Pfsubr, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pi2fd, Pi2fd, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pi2fd, Pi2fd, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pi2fw, Pi2fw, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pi2fw, Pi2fw, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pmulhrw, Pmulhrw, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pmulhrw, Pmulhrw, Mm, Mem) // 3DNOW
+ ASMJIT_INST_2x(pswapd, Pswapd, Mm, Mm) // 3DNOW
+ ASMJIT_INST_2x(pswapd, Pswapd, Mm, Mem) // 3DNOW
+ ASMJIT_INST_0x(femms, Femms) // 3DNOW
+
+ //! \}
+
+ //! \name AESNI Instructions
+ //! \{
+
+ ASMJIT_INST_2x(aesdec, Aesdec, Xmm, Xmm) // AESNI
+ ASMJIT_INST_2x(aesdec, Aesdec, Xmm, Mem) // AESNI
+ ASMJIT_INST_2x(aesdeclast, Aesdeclast, Xmm, Xmm) // AESNI
+ ASMJIT_INST_2x(aesdeclast, Aesdeclast, Xmm, Mem) // AESNI
+ ASMJIT_INST_2x(aesenc, Aesenc, Xmm, Xmm) // AESNI
+ ASMJIT_INST_2x(aesenc, Aesenc, Xmm, Mem) // AESNI
+ ASMJIT_INST_2x(aesenclast, Aesenclast, Xmm, Xmm) // AESNI
+ ASMJIT_INST_2x(aesenclast, Aesenclast, Xmm, Mem) // AESNI
+ ASMJIT_INST_2x(aesimc, Aesimc, Xmm, Xmm) // AESNI
+ ASMJIT_INST_2x(aesimc, Aesimc, Xmm, Mem) // AESNI
+ ASMJIT_INST_3i(aeskeygenassist, Aeskeygenassist, Xmm, Xmm, Imm) // AESNI
+ ASMJIT_INST_3i(aeskeygenassist, Aeskeygenassist, Xmm, Mem, Imm) // AESNI
+
+ //! \}
+
+ //! \name SHA Instructions
+ //! \{
+
+ ASMJIT_INST_2x(sha1msg1, Sha1msg1, Xmm, Xmm) // SHA
+ ASMJIT_INST_2x(sha1msg1, Sha1msg1, Xmm, Mem) // SHA
+ ASMJIT_INST_2x(sha1msg2, Sha1msg2, Xmm, Xmm) // SHA
+ ASMJIT_INST_2x(sha1msg2, Sha1msg2, Xmm, Mem) // SHA
+ ASMJIT_INST_2x(sha1nexte, Sha1nexte, Xmm, Xmm) // SHA
+ ASMJIT_INST_2x(sha1nexte, Sha1nexte, Xmm, Mem) // SHA
+ ASMJIT_INST_3i(sha1rnds4, Sha1rnds4, Xmm, Xmm, Imm) // SHA
+ ASMJIT_INST_3i(sha1rnds4, Sha1rnds4, Xmm, Mem, Imm) // SHA
+ ASMJIT_INST_2x(sha256msg1, Sha256msg1, Xmm, Xmm) // SHA
+ ASMJIT_INST_2x(sha256msg1, Sha256msg1, Xmm, Mem) // SHA
+ ASMJIT_INST_2x(sha256msg2, Sha256msg2, Xmm, Xmm) // SHA
+ ASMJIT_INST_2x(sha256msg2, Sha256msg2, Xmm, Mem) // SHA
+ ASMJIT_INST_3x(sha256rnds2, Sha256rnds2, Xmm, Xmm, XMM0) // SHA [EXPLICIT]
+ ASMJIT_INST_3x(sha256rnds2, Sha256rnds2, Xmm, Mem, XMM0) // SHA [EXPLICIT]
+
+ //! \}
+
+ //! \name AVX, FMA, and AVX512 Instructions
+ //! \{
+
+ ASMJIT_INST_3x(kaddb, Kaddb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kaddd, Kaddd, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kaddq, Kaddq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kaddw, Kaddw, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kandb, Kandb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kandd, Kandd, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kandnb, Kandnb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kandnd, Kandnd, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kandnq, Kandnq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kandnw, Kandnw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3x(kandq, Kandq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kandw, Kandw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_2x(kmovb, Kmovb, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(kmovb, Kmovb, KReg, Mem) // AVX512_DQ
+ ASMJIT_INST_2x(kmovb, Kmovb, KReg, Gp) // AVX512_DQ
+ ASMJIT_INST_2x(kmovb, Kmovb, Mem, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(kmovb, Kmovb, Gp, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(kmovd, Kmovd, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovd, Kmovd, KReg, Mem) // AVX512_BW
+ ASMJIT_INST_2x(kmovd, Kmovd, KReg, Gp) // AVX512_BW
+ ASMJIT_INST_2x(kmovd, Kmovd, Mem, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovd, Kmovd, Gp, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovq, Kmovq, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovq, Kmovq, KReg, Mem) // AVX512_BW
+ ASMJIT_INST_2x(kmovq, Kmovq, KReg, Gp) // AVX512_BW
+ ASMJIT_INST_2x(kmovq, Kmovq, Mem, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovq, Kmovq, Gp, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kmovw, Kmovw, KReg, KReg) // AVX512_F
+ ASMJIT_INST_2x(kmovw, Kmovw, KReg, Mem) // AVX512_F
+ ASMJIT_INST_2x(kmovw, Kmovw, KReg, Gp) // AVX512_F
+ ASMJIT_INST_2x(kmovw, Kmovw, Mem, KReg) // AVX512_F
+ ASMJIT_INST_2x(kmovw, Kmovw, Gp, KReg) // AVX512_F
+ ASMJIT_INST_2x(knotb, Knotb, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(knotd, Knotd, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(knotq, Knotq, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(knotw, Knotw, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3x(korb, Korb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kord, Kord, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(korq, Korq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kortestb, Kortestb, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(kortestd, Kortestd, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kortestq, Kortestq, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(kortestw, Kortestw, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3x(korw, Korw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3i(kshiftlb, Kshiftlb, KReg, KReg, Imm) // AVX512_DQ
+ ASMJIT_INST_3i(kshiftld, Kshiftld, KReg, KReg, Imm) // AVX512_BW
+ ASMJIT_INST_3i(kshiftlq, Kshiftlq, KReg, KReg, Imm) // AVX512_BW
+ ASMJIT_INST_3i(kshiftlw, Kshiftlw, KReg, KReg, Imm) // AVX512_F
+ ASMJIT_INST_3i(kshiftrb, Kshiftrb, KReg, KReg, Imm) // AVX512_DQ
+ ASMJIT_INST_3i(kshiftrd, Kshiftrd, KReg, KReg, Imm) // AVX512_BW
+ ASMJIT_INST_3i(kshiftrq, Kshiftrq, KReg, KReg, Imm) // AVX512_BW
+ ASMJIT_INST_3i(kshiftrw, Kshiftrw, KReg, KReg, Imm) // AVX512_F
+ ASMJIT_INST_2x(ktestb, Ktestb, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(ktestd, Ktestd, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(ktestq, Ktestq, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_2x(ktestw, Ktestw, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kunpckbw, Kunpckbw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3x(kunpckdq, Kunpckdq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kunpckwd, Kunpckwd, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kxnorb, Kxnorb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kxnord, Kxnord, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kxnorq, Kxnorq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kxnorw, Kxnorw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_3x(kxorb, Kxorb, KReg, KReg, KReg) // AVX512_DQ
+ ASMJIT_INST_3x(kxord, Kxord, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kxorq, Kxorq, KReg, KReg, KReg) // AVX512_BW
+ ASMJIT_INST_3x(kxorw, Kxorw, KReg, KReg, KReg) // AVX512_F
+ ASMJIT_INST_6x(v4fmaddps, V4fmaddps, Zmm, Zmm, Zmm, Zmm, Zmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_6x(v4fmaddss, V4fmaddss, Xmm, Xmm, Xmm, Xmm, Xmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_6x(v4fnmaddps, V4fnmaddps, Zmm, Zmm, Zmm, Zmm, Zmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_6x(v4fnmaddss, V4fnmaddss, Xmm, Xmm, Xmm, Xmm, Xmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vaddpd, Vaddpd, Zmm, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vaddps, Vaddps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vaddps, Vaddps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vaddps, Vaddps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vaddps, Vaddps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vaddps, Vaddps, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vaddps, Vaddps, Zmm, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vaddsd, Vaddsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vaddsd, Vaddsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vaddss, Vaddss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vaddss, Vaddss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vaddsubpd, Vaddsubpd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaddsubpd, Vaddsubpd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaddsubpd, Vaddsubpd, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vaddsubpd, Vaddsubpd, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vaddsubps, Vaddsubps, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaddsubps, Vaddsubps, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaddsubps, Vaddsubps, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vaddsubps, Vaddsubps, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Ymm, Ymm, Ymm) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Ymm, Ymm, Mem) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Zmm, Zmm, Zmm) // VAES
+ ASMJIT_INST_3x(vaesdec, Vaesdec, Zmm, Zmm, Mem) // VAES
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Ymm, Ymm, Ymm) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Ymm, Ymm, Mem) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Zmm, Zmm, Zmm) // VAES
+ ASMJIT_INST_3x(vaesdeclast, Vaesdeclast, Zmm, Zmm, Mem) // VAES
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Ymm, Ymm, Ymm) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Ymm, Ymm, Mem) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Zmm, Zmm, Zmm) // VAES
+ ASMJIT_INST_3x(vaesenc, Vaesenc, Zmm, Zmm, Mem) // VAES
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Ymm, Ymm, Ymm) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Ymm, Ymm, Mem) // VAES AVX512_VL
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Zmm, Zmm, Zmm) // VAES
+ ASMJIT_INST_3x(vaesenclast, Vaesenclast, Zmm, Zmm, Mem) // VAES
+ ASMJIT_INST_2x(vaesimc, Vaesimc, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vaesimc, Vaesimc, Xmm, Mem) // AVX
+ ASMJIT_INST_3i(vaeskeygenassist, Vaeskeygenassist, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_3i(vaeskeygenassist, Vaeskeygenassist, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(valignd, Valignd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(valignd, Valignd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(valignd, Valignd, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(valignd, Valignd, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(valignd, Valignd, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(valignd, Valignd, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(valignq, Valignq, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(valignq, Valignq, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(valignq, Valignq, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(valignq, Valignq, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(valignq, Valignq, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(valignq, Valignq, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vandnpd, Vandnpd, Zmm, Zmm, Mem) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vandnps, Vandnps, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandnps, Vandnps, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandnps, Vandnps, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandnps, Vandnps, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandnps, Vandnps, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3x(vandnps, Vandnps, Zmm, Zmm, Mem) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3x(vandpd, Vandpd, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandpd, Vandpd, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandpd, Vandpd, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandpd, Vandpd, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vandpd, Vandpd, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vandpd, Vandpd, Zmm, Zmm, Mem) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vandps, Vandps, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandps, Vandps, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandps, Vandps, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandps, Vandps, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vandps, Vandps, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3x(vandps, Vandps, Zmm, Zmm, Mem) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vblendmb, Vblendmb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vblendmd, Vblendmd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vblendmpd, Vblendmpd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vblendmps, Vblendmps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vblendmq, Vblendmq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vblendmw, Vblendmw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vblendpd, Vblendpd, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vblendpd, Vblendpd, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vblendpd, Vblendpd, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vblendpd, Vblendpd, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vblendps, Vblendps, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vblendps, Vblendps, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vblendps, Vblendps, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vblendps, Vblendps, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4x(vblendvpd, Vblendvpd, Xmm, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_4x(vblendvpd, Vblendvpd, Xmm, Xmm, Mem, Xmm) // AVX
+ ASMJIT_INST_4x(vblendvpd, Vblendvpd, Ymm, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_4x(vblendvpd, Vblendvpd, Ymm, Ymm, Mem, Ymm) // AVX
+ ASMJIT_INST_4x(vblendvps, Vblendvps, Xmm, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_4x(vblendvps, Vblendvps, Xmm, Xmm, Mem, Xmm) // AVX
+ ASMJIT_INST_4x(vblendvps, Vblendvps, Ymm, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_4x(vblendvps, Vblendvps, Ymm, Ymm, Mem, Ymm) // AVX
+ ASMJIT_INST_2x(vbroadcastf128, Vbroadcastf128, Ymm, Mem) // AVX
+ ASMJIT_INST_2x(vbroadcastf32x2, Vbroadcastf32x2, Ymm, Xmm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcastf32x2, Vbroadcastf32x2, Ymm, Mem) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcastf32x2, Vbroadcastf32x2, Zmm, Xmm) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcastf32x2, Vbroadcastf32x2, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcastf32x4, Vbroadcastf32x4, Ymm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastf32x4, Vbroadcastf32x4, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastf32x8, Vbroadcastf32x8, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcastf64x2, Vbroadcastf64x2, Ymm, Mem) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcastf64x2, Vbroadcastf64x2, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcastf64x4, Vbroadcastf64x4, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcasti128, Vbroadcasti128, Ymm, Mem) // AVX2
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Xmm, Xmm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Xmm, Mem) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Ymm, Xmm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Ymm, Mem) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Zmm, Xmm) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcasti32x2, Vbroadcasti32x2, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcasti32x4, Vbroadcasti32x4, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti32x4, Vbroadcasti32x4, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcasti32x8, Vbroadcasti32x8, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcasti64x2, Vbroadcasti64x2, Ymm, Xmm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti64x2, Vbroadcasti64x2, Ymm, Mem) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_2x(vbroadcasti64x2, Vbroadcasti64x2, Zmm, Xmm) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcasti64x2, Vbroadcasti64x2, Zmm, Mem) // AVX512_DQ{kz}
+ ASMJIT_INST_2x(vbroadcasti64x4, Vbroadcasti64x4, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcasti64x4, Vbroadcasti64x4, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastsd, Vbroadcastsd, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcastsd, Vbroadcastsd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcastsd, Vbroadcastsd, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastsd, Vbroadcastsd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Xmm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Ymm, Mem) // AVX AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Ymm, Xmm) // AVX2 AVX512_F{kz}
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Zmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vbroadcastss, Vbroadcastss, Zmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_4i(vcmppd, Vcmppd, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vcmppd, Vcmppd, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmppd, Vcmppd, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vcmppd, Vcmppd, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_4i(vcmppd, Vcmppd, KReg, Zmm, Mem, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_4i(vcmpps, Vcmpps, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vcmpps, Vcmpps, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmpps, Vcmpps, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vcmpps, Vcmpps, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vcmpps, Vcmpps, KReg, Zmm, Mem, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vcmpsd, Vcmpsd, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vcmpsd, Vcmpsd, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmpsd, Vcmpsd, KReg, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vcmpsd, Vcmpsd, KReg, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vcmpss, Vcmpss, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vcmpss, Vcmpss, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vcmpss, Vcmpss, KReg, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vcmpss, Vcmpss, KReg, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_2x(vcomisd, Vcomisd, Xmm, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcomisd, Vcomisd, Xmm, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcomiss, Vcomiss, Xmm, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcomiss, Vcomiss, Xmm, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vcompresspd, Vcompresspd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vcompressps, Vcompressps, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Ymm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Zmm, Ymm) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vcvtdq2pd, Vcvtdq2pd, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtdq2ps, Vcvtdq2ps, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Xmm, Xmm, Xmm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Xmm, Xmm, Mem) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Ymm, Ymm, Ymm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Ymm, Ymm, Mem) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Zmm, Zmm, Zmm) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_3x(vcvtne2ps2bf16, Vcvtne2ps2bf16, Zmm, Zmm, Mem) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_2x(vcvtneps2bf16, Vcvtneps2bf16, Xmm, Xmm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtneps2bf16, Vcvtneps2bf16, Xmm, Ymm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtneps2bf16, Vcvtneps2bf16, Xmm, Mem) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtneps2bf16, Vcvtneps2bf16, Ymm, Zmm) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_2x(vcvtneps2bf16, Vcvtneps2bf16, Ymm, Mem) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_2x(vcvtpd2dq, Vcvtpd2dq, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2dq, Vcvtpd2dq, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2dq, Vcvtpd2dq, Xmm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2dq, Vcvtpd2dq, Ymm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2dq, Vcvtpd2dq, Ymm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2ps, Vcvtpd2ps, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2ps, Vcvtpd2ps, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2ps, Vcvtpd2ps, Xmm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2ps, Vcvtpd2ps, Ymm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2ps, Vcvtpd2ps, Ymm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Zmm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2qq, Vcvtpd2qq, Zmm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2udq, Vcvtpd2udq, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2udq, Vcvtpd2udq, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2udq, Vcvtpd2udq, Xmm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2udq, Vcvtpd2udq, Ymm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2udq, Vcvtpd2udq, Ymm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Zmm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtpd2uqq, Vcvtpd2uqq, Zmm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Xmm, Xmm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Xmm, Mem) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Ymm, Xmm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Ymm, Mem) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Zmm, Ymm) // AVX512_F{kz|sae}
+ ASMJIT_INST_2x(vcvtph2ps, Vcvtph2ps, Zmm, Mem) // AVX512_F{kz|sae}
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2dq, Vcvtps2dq, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Ymm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Zmm, Ymm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2pd, Vcvtps2pd, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Xmm, Xmm, Imm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Mem, Xmm, Imm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Xmm, Ymm, Imm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Mem, Ymm, Imm) // F16C AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Ymm, Zmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_3i(vcvtps2ph, Vcvtps2ph, Mem, Zmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Xmm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Xmm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Ymm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Ymm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Zmm, Ymm) // AVX512_DQ{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2qq, Vcvtps2qq, Zmm, Mem) // AVX512_DQ{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2udq, Vcvtps2udq, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Xmm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Xmm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Ymm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Ymm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Zmm, Ymm) // AVX512_DQ{kz|er|b32}
+ ASMJIT_INST_2x(vcvtps2uqq, Vcvtps2uqq, Zmm, Mem) // AVX512_DQ{kz|er|b32}
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Zmm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtqq2pd, Vcvtqq2pd, Zmm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtqq2ps, Vcvtqq2ps, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2ps, Vcvtqq2ps, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2ps, Vcvtqq2ps, Xmm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtqq2ps, Vcvtqq2ps, Ymm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtqq2ps, Vcvtqq2ps, Ymm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtsd2si, Vcvtsd2si, Gp, Xmm) // AVX AVX512_F{er}
+ ASMJIT_INST_2x(vcvtsd2si, Vcvtsd2si, Gp, Mem) // AVX AVX512_F{er}
+ ASMJIT_INST_3x(vcvtsd2ss, Vcvtsd2ss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vcvtsd2ss, Vcvtsd2ss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_2x(vcvtsd2usi, Vcvtsd2usi, Gp, Xmm) // AVX512_F{er}
+ ASMJIT_INST_2x(vcvtsd2usi, Vcvtsd2usi, Gp, Mem) // AVX512_F{er}
+ ASMJIT_INST_3x(vcvtsi2sd, Vcvtsi2sd, Xmm, Xmm, Gp) // AVX AVX512_F{er}
+ ASMJIT_INST_3x(vcvtsi2sd, Vcvtsi2sd, Xmm, Xmm, Mem) // AVX AVX512_F{er}
+ ASMJIT_INST_3x(vcvtsi2ss, Vcvtsi2ss, Xmm, Xmm, Gp) // AVX AVX512_F{er}
+ ASMJIT_INST_3x(vcvtsi2ss, Vcvtsi2ss, Xmm, Xmm, Mem) // AVX AVX512_F{er}
+ ASMJIT_INST_3x(vcvtss2sd, Vcvtss2sd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|sae}
+ ASMJIT_INST_3x(vcvtss2sd, Vcvtss2sd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|sae}
+ ASMJIT_INST_2x(vcvtss2si, Vcvtss2si, Gp, Xmm) // AVX AVX512_F{er}
+ ASMJIT_INST_2x(vcvtss2si, Vcvtss2si, Gp, Mem) // AVX AVX512_F{er}
+ ASMJIT_INST_2x(vcvtss2usi, Vcvtss2usi, Gp, Xmm) // AVX512_F{er}
+ ASMJIT_INST_2x(vcvtss2usi, Vcvtss2usi, Gp, Mem) // AVX512_F{er}
+ ASMJIT_INST_2x(vcvttpd2dq, Vcvttpd2dq, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2dq, Vcvttpd2dq, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2dq, Vcvttpd2dq, Xmm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2dq, Vcvttpd2dq, Ymm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2dq, Vcvttpd2dq, Ymm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Zmm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2qq, Vcvttpd2qq, Zmm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2udq, Vcvttpd2udq, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2udq, Vcvttpd2udq, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2udq, Vcvttpd2udq, Xmm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2udq, Vcvttpd2udq, Ymm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2udq, Vcvttpd2udq, Ymm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Zmm, Zmm) // AVX512_DQ{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttpd2uqq, Vcvttpd2uqq, Zmm, Mem) // AVX512_DQ{kz|sae|b64}
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Zmm, Zmm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2dq, Vcvttps2dq, Zmm, Mem) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Xmm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Xmm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Ymm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Ymm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Zmm, Ymm) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2qq, Vcvttps2qq, Zmm, Mem) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Zmm, Zmm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2udq, Vcvttps2udq, Zmm, Mem) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Xmm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Xmm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Ymm, Xmm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Ymm, Mem) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Zmm, Ymm) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttps2uqq, Vcvttps2uqq, Zmm, Mem) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_2x(vcvttsd2si, Vcvttsd2si, Gp, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttsd2si, Vcvttsd2si, Gp, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttsd2usi, Vcvttsd2usi, Gp, Xmm) // AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttsd2usi, Vcvttsd2usi, Gp, Mem) // AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttss2si, Vcvttss2si, Gp, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttss2si, Vcvttss2si, Gp, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttss2usi, Vcvttss2usi, Gp, Xmm) // AVX512_F{sae}
+ ASMJIT_INST_2x(vcvttss2usi, Vcvttss2usi, Gp, Mem) // AVX512_F{sae}
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Ymm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Zmm, Ymm) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vcvtudq2pd, Vcvtudq2pd, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtudq2ps, Vcvtudq2ps, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Zmm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtuqq2pd, Vcvtuqq2pd, Zmm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtuqq2ps, Vcvtuqq2ps, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2ps, Vcvtuqq2ps, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2ps, Vcvtuqq2ps, Xmm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_2x(vcvtuqq2ps, Vcvtuqq2ps, Ymm, Zmm) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_2x(vcvtuqq2ps, Vcvtuqq2ps, Ymm, Mem) // AVX512_DQ{kz|er|b64}
+ ASMJIT_INST_3x(vcvtusi2sd, Vcvtusi2sd, Xmm, Xmm, Gp) // AVX512_F{er}
+ ASMJIT_INST_3x(vcvtusi2sd, Vcvtusi2sd, Xmm, Xmm, Mem) // AVX512_F{er}
+ ASMJIT_INST_3x(vcvtusi2ss, Vcvtusi2ss, Xmm, Xmm, Gp) // AVX512_F{er}
+ ASMJIT_INST_3x(vcvtusi2ss, Vcvtusi2ss, Xmm, Xmm, Mem) // AVX512_F{er}
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Xmm, Xmm, Xmm, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Xmm, Xmm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Ymm, Ymm, Ymm, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Ymm, Ymm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Zmm, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vdbpsadbw, Vdbpsadbw, Zmm, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vdivpd, Vdivpd, Zmm, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vdivps, Vdivps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vdivps, Vdivps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vdivps, Vdivps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vdivps, Vdivps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vdivps, Vdivps, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vdivps, Vdivps, Zmm, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vdivsd, Vdivsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vdivsd, Vdivsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vdivss, Vdivss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vdivss, Vdivss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Xmm, Xmm, Xmm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Xmm, Xmm, Mem) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Ymm, Ymm, Ymm) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Ymm, Ymm, Mem) // AVX512_BF16{kz|b32}-VL
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Zmm, Zmm, Zmm) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_3x(vdpbf16ps, Vdpbf16ps, Zmm, Zmm, Mem) // AVX512_BF16{kz|b32}
+ ASMJIT_INST_4i(vdppd, Vdppd, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vdppd, Vdppd, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vdpps, Vdpps, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vdpps, Vdpps, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vdpps, Vdpps, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vdpps, Vdpps, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_2x(vexp2pd, Vexp2pd, Zmm, Zmm) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vexp2pd, Vexp2pd, Zmm, Mem) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vexp2ps, Vexp2ps, Zmm, Zmm) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_2x(vexp2ps, Vexp2ps, Zmm, Mem) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vexpandpd, Vexpandpd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vexpandps, Vexpandps, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextractf128, Vextractf128, Xmm, Ymm, Imm) // AVX
+ ASMJIT_INST_3i(vextractf128, Vextractf128, Mem, Ymm, Imm) // AVX
+ ASMJIT_INST_3i(vextractf32x4, Vextractf32x4, Xmm, Ymm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vextractf32x4, Vextractf32x4, Mem, Ymm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vextractf32x4, Vextractf32x4, Xmm, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextractf32x4, Vextractf32x4, Mem, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextractf32x8, Vextractf32x8, Ymm, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextractf32x8, Vextractf32x8, Mem, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextractf64x2, Vextractf64x2, Xmm, Ymm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_3i(vextractf64x2, Vextractf64x2, Mem, Ymm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_3i(vextractf64x2, Vextractf64x2, Xmm, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextractf64x2, Vextractf64x2, Mem, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextractf64x4, Vextractf64x4, Ymm, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextractf64x4, Vextractf64x4, Mem, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextracti128, Vextracti128, Xmm, Ymm, Imm) // AVX2
+ ASMJIT_INST_3i(vextracti128, Vextracti128, Mem, Ymm, Imm) // AVX2
+ ASMJIT_INST_3i(vextracti32x4, Vextracti32x4, Xmm, Ymm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vextracti32x4, Vextracti32x4, Mem, Ymm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vextracti32x4, Vextracti32x4, Xmm, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextracti32x4, Vextracti32x4, Mem, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextracti32x8, Vextracti32x8, Ymm, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextracti32x8, Vextracti32x8, Mem, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextracti64x2, Vextracti64x2, Xmm, Ymm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_3i(vextracti64x2, Vextracti64x2, Mem, Ymm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_3i(vextracti64x2, Vextracti64x2, Xmm, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextracti64x2, Vextracti64x2, Mem, Zmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vextracti64x4, Vextracti64x4, Ymm, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextracti64x4, Vextracti64x4, Mem, Zmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_3i(vextractps, Vextractps, Gp, Xmm, Imm) // AVX AVX512_F
+ ASMJIT_INST_3i(vextractps, Vextractps, Mem, Xmm, Imm) // AVX AVX512_F
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_4i(vfixupimmpd, Vfixupimmpd, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vfixupimmps, Vfixupimmps, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vfixupimmsd, Vfixupimmsd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vfixupimmsd, Vfixupimmsd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vfixupimmss, Vfixupimmss, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vfixupimmss, Vfixupimmss, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd132pd, Vfmadd132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd132ps, Vfmadd132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd132sd, Vfmadd132sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd132sd, Vfmadd132sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd132ss, Vfmadd132ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd132ss, Vfmadd132ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd213pd, Vfmadd213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd213ps, Vfmadd213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd213sd, Vfmadd213sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd213sd, Vfmadd213sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd213ss, Vfmadd213ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd213ss, Vfmadd213ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd231pd, Vfmadd231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd231ps, Vfmadd231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmadd231sd, Vfmadd231sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd231sd, Vfmadd231sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd231ss, Vfmadd231ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmadd231ss, Vfmadd231ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub132pd, Vfmaddsub132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmaddsub132ps, Vfmaddsub132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub213pd, Vfmaddsub213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmaddsub213ps, Vfmaddsub213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub231pd, Vfmaddsub231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmaddsub231ps, Vfmaddsub231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub132pd, Vfmsub132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub132ps, Vfmsub132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub132sd, Vfmsub132sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub132sd, Vfmsub132sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub132ss, Vfmsub132ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub132ss, Vfmsub132ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub213pd, Vfmsub213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub213ps, Vfmsub213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub213sd, Vfmsub213sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub213sd, Vfmsub213sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub213ss, Vfmsub213ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub213ss, Vfmsub213ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub231pd, Vfmsub231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub231ps, Vfmsub231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsub231sd, Vfmsub231sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub231sd, Vfmsub231sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub231ss, Vfmsub231ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsub231ss, Vfmsub231ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd132pd, Vfmsubadd132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsubadd132ps, Vfmsubadd132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd213pd, Vfmsubadd213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsubadd213ps, Vfmsubadd213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd231pd, Vfmsubadd231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfmsubadd231ps, Vfmsubadd231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd132pd, Vfnmadd132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd132ps, Vfnmadd132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd132sd, Vfnmadd132sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd132sd, Vfnmadd132sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd132ss, Vfnmadd132ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd132ss, Vfnmadd132ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd213pd, Vfnmadd213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd213ps, Vfnmadd213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd213sd, Vfnmadd213sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd213sd, Vfnmadd213sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd213ss, Vfnmadd213ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd213ss, Vfnmadd213ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd231pd, Vfnmadd231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd231ps, Vfnmadd231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmadd231sd, Vfnmadd231sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd231sd, Vfnmadd231sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd231ss, Vfnmadd231ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmadd231ss, Vfnmadd231ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub132pd, Vfnmsub132pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub132ps, Vfnmsub132ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub132sd, Vfnmsub132sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub132sd, Vfnmsub132sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub132ss, Vfnmsub132ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub132ss, Vfnmsub132ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub213pd, Vfnmsub213pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub213ps, Vfnmsub213ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub213sd, Vfnmsub213sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub213sd, Vfnmsub213sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub213ss, Vfnmsub213ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub213ss, Vfnmsub213ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub231pd, Vfnmsub231pd, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Xmm, Xmm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Ymm, Ymm, Ymm) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Ymm, Ymm, Mem) // FMA AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Zmm, Zmm, Zmm) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub231ps, Vfnmsub231ps, Zmm, Zmm, Mem) // FMA AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vfnmsub231sd, Vfnmsub231sd, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub231sd, Vfnmsub231sd, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub231ss, Vfnmsub231ss, Xmm, Xmm, Xmm) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3x(vfnmsub231ss, Vfnmsub231ss, Xmm, Xmm, Mem) // FMA AVX512_F{kz|er}
+ ASMJIT_INST_3i(vfpclasspd, Vfpclasspd, KReg, Xmm, Imm) // AVX512_DQ{k|b64}-VL
+ ASMJIT_INST_3i(vfpclasspd, Vfpclasspd, KReg, Mem, Imm) // AVX512_DQ{k|b64} AVX512_DQ{k|b64}-VL
+ ASMJIT_INST_3i(vfpclasspd, Vfpclasspd, KReg, Ymm, Imm) // AVX512_DQ{k|b64}-VL
+ ASMJIT_INST_3i(vfpclasspd, Vfpclasspd, KReg, Zmm, Imm) // AVX512_DQ{k|b64}
+ ASMJIT_INST_3i(vfpclassps, Vfpclassps, KReg, Xmm, Imm) // AVX512_DQ{k|b32}-VL
+ ASMJIT_INST_3i(vfpclassps, Vfpclassps, KReg, Mem, Imm) // AVX512_DQ{k|b32} AVX512_DQ{k|b32}-VL
+ ASMJIT_INST_3i(vfpclassps, Vfpclassps, KReg, Ymm, Imm) // AVX512_DQ{k|b32}-VL
+ ASMJIT_INST_3i(vfpclassps, Vfpclassps, KReg, Zmm, Imm) // AVX512_DQ{k|b32}
+ ASMJIT_INST_3i(vfpclasssd, Vfpclasssd, KReg, Xmm, Imm) // AVX512_DQ{k}
+ ASMJIT_INST_3i(vfpclasssd, Vfpclasssd, KReg, Mem, Imm) // AVX512_DQ{k}
+ ASMJIT_INST_3i(vfpclassss, Vfpclassss, KReg, Xmm, Imm) // AVX512_DQ{k}
+ ASMJIT_INST_3i(vfpclassss, Vfpclassss, KReg, Mem, Imm) // AVX512_DQ{k}
+ ASMJIT_INST_3x(vgatherdpd, Vgatherdpd, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vgatherdpd, Vgatherdpd, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vgatherdpd, Vgatherdpd, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherdpd, Vgatherdpd, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherdpd, Vgatherdpd, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vgatherdps, Vgatherdps, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vgatherdps, Vgatherdps, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vgatherdps, Vgatherdps, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherdps, Vgatherdps, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherdps, Vgatherdps, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_1x(vgatherpf0dpd, Vgatherpf0dpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf0dps, Vgatherpf0dps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf0qpd, Vgatherpf0qpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf0qps, Vgatherpf0qps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf1dpd, Vgatherpf1dpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf1dps, Vgatherpf1dps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf1qpd, Vgatherpf1qpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vgatherpf1qps, Vgatherpf1qps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_3x(vgatherqpd, Vgatherqpd, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vgatherqpd, Vgatherqpd, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vgatherqpd, Vgatherqpd, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherqpd, Vgatherqpd, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherqpd, Vgatherqpd, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vgatherqps, Vgatherqps, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_2x(vgatherqps, Vgatherqps, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherqps, Vgatherqps, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vgatherqps, Vgatherqps, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Zmm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vgetexppd, Vgetexppd, Zmm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Zmm, Zmm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_2x(vgetexpps, Vgetexpps, Zmm, Mem) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3x(vgetexpsd, Vgetexpsd, Xmm, Xmm, Xmm) // AVX512_F{kz|sae}
+ ASMJIT_INST_3x(vgetexpsd, Vgetexpsd, Xmm, Xmm, Mem) // AVX512_F{kz|sae}
+ ASMJIT_INST_3x(vgetexpss, Vgetexpss, Xmm, Xmm, Xmm) // AVX512_F{kz|sae}
+ ASMJIT_INST_3x(vgetexpss, Vgetexpss, Xmm, Xmm, Mem) // AVX512_F{kz|sae}
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3i(vgetmantpd, Vgetmantpd, Zmm, Mem, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3i(vgetmantps, Vgetmantps, Zmm, Mem, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vgetmantsd, Vgetmantsd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vgetmantsd, Vgetmantsd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vgetmantss, Vgetmantss, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vgetmantss, Vgetmantss, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Xmm,Xmm,Xmm,Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Xmm,Xmm,Mem,Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Ymm,Ymm,Ymm,Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Ymm,Ymm,Mem,Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Zmm,Zmm,Zmm,Imm) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineinvqb, Vgf2p8affineinvqb,Zmm,Zmm,Mem,Imm) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Xmm, Xmm, Xmm, Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Xmm, Xmm, Mem, Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Ymm, Ymm, Ymm, Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Ymm, Ymm, Mem, Imm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Zmm, Zmm, Zmm, Imm) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_4i(vgf2p8affineqb, Vgf2p8affineqb, Zmm, Zmm, Mem, Imm) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Xmm, Xmm, Xmm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Xmm, Xmm, Mem) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Ymm, Ymm, Ymm) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Ymm, Ymm, Mem) // AVX AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Zmm, Zmm, Zmm) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vgf2p8mulb, Vgf2p8mulb, Zmm, Zmm, Mem) // AVX512_VL{kz} GFNI
+ ASMJIT_INST_3x(vhaddpd, Vhaddpd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vhaddpd, Vhaddpd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vhaddpd, Vhaddpd, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vhaddpd, Vhaddpd, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vhaddps, Vhaddps, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vhaddps, Vhaddps, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vhaddps, Vhaddps, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vhaddps, Vhaddps, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vhsubpd, Vhsubpd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vhsubpd, Vhsubpd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vhsubpd, Vhsubpd, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vhsubpd, Vhsubpd, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vhsubps, Vhsubps, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vhsubps, Vhsubps, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vhsubps, Vhsubps, Ymm, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vhsubps, Vhsubps, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_4i(vinsertf128, Vinsertf128, Ymm, Ymm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vinsertf128, Vinsertf128, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vinsertf32x4, Vinsertf32x4, Ymm, Ymm, Xmm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_4i(vinsertf32x4, Vinsertf32x4, Ymm, Ymm, Mem, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_4i(vinsertf32x4, Vinsertf32x4, Zmm, Zmm, Xmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinsertf32x4, Vinsertf32x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinsertf32x8, Vinsertf32x8, Zmm, Zmm, Ymm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinsertf32x8, Vinsertf32x8, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinsertf64x2, Vinsertf64x2, Ymm, Ymm, Xmm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_4i(vinsertf64x2, Vinsertf64x2, Ymm, Ymm, Mem, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_4i(vinsertf64x2, Vinsertf64x2, Zmm, Zmm, Xmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinsertf64x2, Vinsertf64x2, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinsertf64x4, Vinsertf64x4, Zmm, Zmm, Ymm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinsertf64x4, Vinsertf64x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinserti128, Vinserti128, Ymm, Ymm, Xmm, Imm) // AVX2
+ ASMJIT_INST_4i(vinserti128, Vinserti128, Ymm, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_4i(vinserti32x4, Vinserti32x4, Ymm, Ymm, Xmm, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_4i(vinserti32x4, Vinserti32x4, Ymm, Ymm, Mem, Imm) // AVX512_F{kz}-VL
+ ASMJIT_INST_4i(vinserti32x4, Vinserti32x4, Zmm, Zmm, Xmm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinserti32x4, Vinserti32x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinserti32x8, Vinserti32x8, Zmm, Zmm, Ymm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinserti32x8, Vinserti32x8, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinserti64x2, Vinserti64x2, Ymm, Ymm, Xmm, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_4i(vinserti64x2, Vinserti64x2, Ymm, Ymm, Mem, Imm) // AVX512_DQ{kz}-VL
+ ASMJIT_INST_4i(vinserti64x2, Vinserti64x2, Zmm, Zmm, Xmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinserti64x2, Vinserti64x2, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vinserti64x4, Vinserti64x4, Zmm, Zmm, Ymm, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinserti64x4, Vinserti64x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vinsertps, Vinsertps, Xmm, Xmm, Xmm, Imm) // AVX AVX512_F
+ ASMJIT_INST_4i(vinsertps, Vinsertps, Xmm, Xmm, Mem, Imm) // AVX AVX512_F
+ ASMJIT_INST_2x(vlddqu, Vlddqu, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vlddqu, Vlddqu, Ymm, Mem) // AVX
+ ASMJIT_INST_1x(vldmxcsr, Vldmxcsr, Mem) // AVX
+ ASMJIT_INST_3x(vmaskmovdqu, Vmaskmovdqu, Xmm, Xmm, DS_ZDI) // AVX [EXPLICIT]
+ ASMJIT_INST_3x(vmaskmovpd, Vmaskmovpd, Mem, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vmaskmovpd, Vmaskmovpd, Mem, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vmaskmovpd, Vmaskmovpd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vmaskmovpd, Vmaskmovpd, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vmaskmovps, Vmaskmovps, Mem, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vmaskmovps, Vmaskmovps, Mem, Ymm, Ymm) // AVX
+ ASMJIT_INST_3x(vmaskmovps, Vmaskmovps, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vmaskmovps, Vmaskmovps, Ymm, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Zmm, Zmm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3x(vmaxpd, Vmaxpd, Zmm, Zmm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Zmm, Zmm, Zmm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3x(vmaxps, Vmaxps, Zmm, Zmm, Mem) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3x(vmaxsd, Vmaxsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vmaxsd, Vmaxsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vmaxss, Vmaxss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vmaxss, Vmaxss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vminpd, Vminpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vminpd, Vminpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vminpd, Vminpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vminpd, Vminpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vminpd, Vminpd, Zmm, Zmm, Zmm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3x(vminpd, Vminpd, Zmm, Zmm, Mem) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3x(vminps, Vminps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vminps, Vminps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vminps, Vminps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vminps, Vminps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vminps, Vminps, Zmm, Zmm, Zmm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3x(vminps, Vminps, Zmm, Zmm, Mem) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3x(vminsd, Vminsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vminsd, Vminsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vminss, Vminss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_3x(vminss, Vminss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|sae}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Mem, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Mem, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovapd, Vmovapd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Mem, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Mem, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovaps, Vmovaps, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovd, Vmovd, Gp, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovd, Vmovd, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovd, Vmovd, Xmm, Gp) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovd, Vmovd, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovddup, Vmovddup, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Mem, Xmm) // AVX
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Ymm, Mem) // AVX
+ ASMJIT_INST_2x(vmovdqa, Vmovdqa, Mem, Ymm) // AVX
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa32, Vmovdqa32, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqa64, Vmovdqa64, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Mem, Xmm) // AVX
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Ymm, Mem) // AVX
+ ASMJIT_INST_2x(vmovdqu, Vmovdqu, Mem, Ymm) // AVX
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Mem, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Mem, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vmovdqu16, Vmovdqu16, Mem, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu32, Vmovdqu32, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu64, Vmovdqu64, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Mem, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Mem, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vmovdqu8, Vmovdqu8, Mem, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vmovhlps, Vmovhlps, Xmm, Xmm, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovhpd, Vmovhpd, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_3x(vmovhpd, Vmovhpd, Xmm, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovhps, Vmovhps, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_3x(vmovhps, Vmovhps, Xmm, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_3x(vmovlhps, Vmovlhps, Xmm, Xmm, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovlpd, Vmovlpd, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_3x(vmovlpd, Vmovlpd, Xmm, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovlps, Vmovlps, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_3x(vmovlps, Vmovlps, Xmm, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovmskpd, Vmovmskpd, Gp, Xmm) // AVX
+ ASMJIT_INST_2x(vmovmskpd, Vmovmskpd, Gp, Ymm) // AVX
+ ASMJIT_INST_2x(vmovmskps, Vmovmskps, Gp, Xmm) // AVX
+ ASMJIT_INST_2x(vmovmskps, Vmovmskps, Gp, Ymm) // AVX
+ ASMJIT_INST_2x(vmovntdq, Vmovntdq, Mem, Xmm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntdq, Vmovntdq, Mem, Ymm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntdq, Vmovntdq, Mem, Zmm) // AVX512_F
+ ASMJIT_INST_2x(vmovntdqa, Vmovntdqa, Xmm, Mem) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntdqa, Vmovntdqa, Ymm, Mem) // AVX2 AVX512_F-VL
+ ASMJIT_INST_2x(vmovntdqa, Vmovntdqa, Zmm, Mem) // AVX512_F
+ ASMJIT_INST_2x(vmovntpd, Vmovntpd, Mem, Xmm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntpd, Vmovntpd, Mem, Ymm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntpd, Vmovntpd, Mem, Zmm) // AVX512_F
+ ASMJIT_INST_2x(vmovntps, Vmovntps, Mem, Xmm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntps, Vmovntps, Mem, Ymm) // AVX AVX512_F-VL
+ ASMJIT_INST_2x(vmovntps, Vmovntps, Mem, Zmm) // AVX512_F
+ ASMJIT_INST_2x(vmovq, Vmovq, Gp, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovq, Vmovq, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovq, Vmovq, Xmm, Mem) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovq, Vmovq, Xmm, Gp) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovq, Vmovq, Xmm, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovsd, Vmovsd, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovsd, Vmovsd, Xmm, Mem) // AVX AVX512_F{kz}
+ ASMJIT_INST_3x(vmovsd, Vmovsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovshdup, Vmovshdup, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovsldup, Vmovsldup, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovss, Vmovss, Mem, Xmm) // AVX AVX512_F
+ ASMJIT_INST_2x(vmovss, Vmovss, Xmm, Mem) // AVX AVX512_F{kz}
+ ASMJIT_INST_3x(vmovss, Vmovss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Mem, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Mem, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovupd, Vmovupd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovups, Vmovups, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Mem, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Ymm, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Ymm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Mem, Ymm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vmovups, Vmovups, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovups, Vmovups, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vmovups, Vmovups, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_4i(vmpsadbw, Vmpsadbw, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vmpsadbw, Vmpsadbw, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vmpsadbw, Vmpsadbw, Ymm, Ymm, Ymm, Imm) // AVX2
+ ASMJIT_INST_4i(vmpsadbw, Vmpsadbw, Ymm, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vmulpd, Vmulpd, Zmm, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vmulps, Vmulps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmulps, Vmulps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmulps, Vmulps, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmulps, Vmulps, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vmulps, Vmulps, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vmulps, Vmulps, Zmm, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vmulsd, Vmulsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vmulsd, Vmulsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vmulss, Vmulss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vmulss, Vmulss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vorpd, Vorpd, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vorpd, Vorpd, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vorpd, Vorpd, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vorpd, Vorpd, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vorpd, Vorpd, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vorpd, Vorpd, Zmm, Zmm, Mem) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vorps, Vorps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vorps, Vorps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vorps, Vorps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vorps, Vorps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vorps, Vorps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vorps, Vorps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_6x(vp4dpwssd, Vp4dpwssd, Zmm, Zmm, Zmm, Zmm, Zmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_6x(vp4dpwssds, Vp4dpwssds, Zmm, Zmm, Zmm, Zmm, Zmm, Mem) // AVX512_4FMAPS{kz}
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpabsb, Vpabsb, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Ymm, Ymm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpabsd, Vpabsd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpabsq, Vpabsq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpabsw, Vpabsw, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Zmm, Zmm, Zmm) // AVX512_BW{kz|b32}
+ ASMJIT_INST_3x(vpackssdw, Vpackssdw, Zmm, Zmm, Mem) // AVX512_BW{kz|b32}
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpacksswb, Vpacksswb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz|b32}-VL
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Zmm, Zmm, Zmm) // AVX512_BW{kz|b32}
+ ASMJIT_INST_3x(vpackusdw, Vpackusdw, Zmm, Zmm, Mem) // AVX512_BW{kz|b32}
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpackuswb, Vpackuswb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddb, Vpaddb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpaddd, Vpaddd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpaddq, Vpaddq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddsb, Vpaddsb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddsw, Vpaddsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddusb, Vpaddusb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddusw, Vpaddusw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpaddw, Vpaddw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Xmm, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Xmm, Xmm, Mem, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Ymm, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Ymm, Ymm, Mem, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Zmm, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vpalignr, Vpalignr, Zmm, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpand, Vpand, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpand, Vpand, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpand, Vpand, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpand, Vpand, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpandd, Vpandd, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandd, Vpandd, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandd, Vpandd, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandd, Vpandd, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandd, Vpandd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpandd, Vpandd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpandn, Vpandn, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpandn, Vpandn, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpandn, Vpandn, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpandn, Vpandn, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpandnd, Vpandnd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpandnq, Vpandnq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpandq, Vpandq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandq, Vpandq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandq, Vpandq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandq, Vpandq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpandq, Vpandq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpandq, Vpandq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Ymm, Ymm, Ymm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Ymm, Ymm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpavgb, Vpavgb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Xmm, Xmm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Xmm, Xmm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpavgw, Vpavgw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vpblendd, Vpblendd, Xmm, Xmm, Xmm, Imm) // AVX2
+ ASMJIT_INST_4i(vpblendd, Vpblendd, Xmm, Xmm, Mem, Imm) // AVX2
+ ASMJIT_INST_4i(vpblendd, Vpblendd, Ymm, Ymm, Ymm, Imm) // AVX2
+ ASMJIT_INST_4i(vpblendd, Vpblendd, Ymm, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_4x(vpblendvb, Vpblendvb, Xmm, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_4x(vpblendvb, Vpblendvb, Xmm, Xmm, Mem, Xmm) // AVX
+ ASMJIT_INST_4x(vpblendvb, Vpblendvb, Ymm, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_4x(vpblendvb, Vpblendvb, Ymm, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_4i(vpblendw, Vpblendw, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vpblendw, Vpblendw, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vpblendw, Vpblendw, Ymm, Ymm, Ymm, Imm) // AVX2
+ ASMJIT_INST_4i(vpblendw, Vpblendw, Ymm, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Xmm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Xmm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Xmm, Gp) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Ymm, Gp) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Zmm, Gp) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Zmm, Xmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpbroadcastb, Vpbroadcastb, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Xmm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Xmm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Xmm, Gp) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Ymm, Gp) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Zmm, Gp) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastd, Vpbroadcastd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastmb2d, Vpbroadcastmb2d, Xmm, KReg) // AVX512_CD-VL
+ ASMJIT_INST_2x(vpbroadcastmb2d, Vpbroadcastmb2d, Ymm, KReg) // AVX512_CD-VL
+ ASMJIT_INST_2x(vpbroadcastmb2d, Vpbroadcastmb2d, Zmm, KReg) // AVX512_CD
+ ASMJIT_INST_2x(vpbroadcastmb2q, Vpbroadcastmb2q, Xmm, KReg) // AVX512_CD-VL
+ ASMJIT_INST_2x(vpbroadcastmb2q, Vpbroadcastmb2q, Ymm, KReg) // AVX512_CD-VL
+ ASMJIT_INST_2x(vpbroadcastmb2q, Vpbroadcastmb2q, Zmm, KReg) // AVX512_CD
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Xmm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Xmm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Xmm, Gp) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Ymm, Gp) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Zmm, Gp) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastq, Vpbroadcastq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Xmm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Xmm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Xmm, Gp) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Ymm, Gp) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Zmm, Gp) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Zmm, Xmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpbroadcastw, Vpbroadcastw, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Xmm, Xmm, Xmm, Imm) // AVX AVX512_F-VL
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Xmm, Xmm, Mem, Imm) // AVX AVX512_F-VL
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Ymm, Ymm, Ymm, Imm) // AVX512_F-VL VPCLMULQDQ
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Ymm, Ymm, Mem, Imm) // AVX512_F-VL VPCLMULQDQ
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Zmm, Zmm, Zmm, Imm) // AVX512_F VPCLMULQDQ
+ ASMJIT_INST_4i(vpclmulqdq, Vpclmulqdq, Zmm, Zmm, Mem, Imm) // AVX512_F VPCLMULQDQ
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Xmm, Xmm, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Xmm, Mem, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Ymm, Ymm, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Ymm, Mem, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Zmm, Zmm, Imm) // AVX512_BW{k}
+ ASMJIT_INST_4i(vpcmpb, Vpcmpb, KReg, Zmm, Mem, Imm) // AVX512_BW{k}
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Xmm, Xmm, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Xmm, Mem, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Ymm, Ymm, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Ymm, Mem, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Zmm, Zmm, Imm) // AVX512_F{k|b32}
+ ASMJIT_INST_4i(vpcmpd, Vpcmpd, KReg, Zmm, Mem, Imm) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpeqb, Vpcmpeqb, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Xmm, Xmm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Xmm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Ymm, Ymm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Ymm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Zmm, Zmm) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vpcmpeqd, Vpcmpeqd, KReg, Zmm, Mem) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Xmm, Xmm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Xmm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Ymm, Ymm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Ymm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Zmm, Zmm) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vpcmpeqq, Vpcmpeqq, KReg, Zmm, Mem) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpeqw, Vpcmpeqw, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_6x(vpcmpestri, Vpcmpestri, Xmm, Xmm, Imm, ECX, EAX, EDX) // AVX [EXPLICIT]
+ ASMJIT_INST_6x(vpcmpestri, Vpcmpestri, Xmm, Mem, Imm, ECX, EAX, EDX) // AVX [EXPLICIT]
+ ASMJIT_INST_6x(vpcmpestrm, Vpcmpestrm, Xmm, Xmm, Imm, XMM0, EAX, EDX)// AVX [EXPLICIT]
+ ASMJIT_INST_6x(vpcmpestrm, Vpcmpestrm, Xmm, Mem, Imm, XMM0, EAX, EDX)// AVX [EXPLICIT]
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpgtb, Vpcmpgtb, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Xmm, Xmm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Xmm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Ymm, Ymm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Ymm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Zmm, Zmm) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vpcmpgtd, Vpcmpgtd, KReg, Zmm, Mem) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Xmm, Xmm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Xmm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Ymm, Ymm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Ymm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Zmm, Zmm) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vpcmpgtq, Vpcmpgtq, KReg, Zmm, Mem) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpcmpgtw, Vpcmpgtw, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_4x(vpcmpistri, Vpcmpistri, Xmm, Xmm, Imm, ECX) // AVX [EXPLICIT]
+ ASMJIT_INST_4x(vpcmpistri, Vpcmpistri, Xmm, Mem, Imm, ECX) // AVX [EXPLICIT]
+ ASMJIT_INST_4x(vpcmpistrm, Vpcmpistrm, Xmm, Xmm, Imm, XMM0) // AVX [EXPLICIT]
+ ASMJIT_INST_4x(vpcmpistrm, Vpcmpistrm, Xmm, Mem, Imm, XMM0) // AVX [EXPLICIT]
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Xmm, Xmm, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Xmm, Mem, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Ymm, Ymm, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Ymm, Mem, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Zmm, Zmm, Imm) // AVX512_F{k|b64}
+ ASMJIT_INST_4i(vpcmpq, Vpcmpq, KReg, Zmm, Mem, Imm) // AVX512_F{k|b64}
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Xmm, Xmm, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Xmm, Mem, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Ymm, Ymm, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Ymm, Mem, Imm) // AVX512_BW{k}-VL
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Zmm, Zmm, Imm) // AVX512_BW{k}
+ ASMJIT_INST_4i(vpcmpub, Vpcmpub, KReg, Zmm, Mem, Imm) // AVX512_BW{k}
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Xmm, Xmm, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Xmm, Mem, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Ymm, Ymm, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Ymm, Mem, Imm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Zmm, Zmm, Imm) // AVX512_F{k|b32}
+ ASMJIT_INST_4i(vpcmpud, Vpcmpud, KReg, Zmm, Mem, Imm) // AVX512_F{k|b32}
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Xmm, Xmm, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Xmm, Mem, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Ymm, Ymm, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Ymm, Mem, Imm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Zmm, Zmm, Imm) // AVX512_F{k|b64}
+ ASMJIT_INST_4i(vpcmpuq, Vpcmpuq, KReg, Zmm, Mem, Imm) // AVX512_F{k|b64}
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Xmm, Xmm, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Xmm, Mem, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Ymm, Ymm, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Ymm, Mem, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Zmm, Zmm, Imm) // AVX512_BW{k|b64}
+ ASMJIT_INST_4i(vpcmpuw, Vpcmpuw, KReg, Zmm, Mem, Imm) // AVX512_BW{k|b64}
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Xmm, Xmm, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Xmm, Mem, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Ymm, Ymm, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Ymm, Mem, Imm) // AVX512_BW{k|b64}-VL
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Zmm, Zmm, Imm) // AVX512_BW{k|b64}
+ ASMJIT_INST_4i(vpcmpw, Vpcmpw, KReg, Zmm, Mem, Imm) // AVX512_BW{k|b64}
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Mem, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Mem, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpcompressb, Vpcompressb, Mem, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpcompressd, Vpcompressd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpcompressq, Vpcompressq, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Mem, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Mem, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpcompressw, Vpcompressw, Mem, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Xmm, Xmm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Xmm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Ymm, Ymm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Ymm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Zmm, Zmm) // AVX512_CD{kz|b32}
+ ASMJIT_INST_2x(vpconflictd, Vpconflictd, Zmm, Mem) // AVX512_CD{kz|b32}
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Xmm, Xmm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Xmm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Ymm, Ymm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Ymm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Zmm, Zmm) // AVX512_CD{kz|b32}
+ ASMJIT_INST_2x(vpconflictq, Vpconflictq, Zmm, Mem) // AVX512_CD{kz|b32}
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Xmm, Xmm, Xmm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Xmm, Xmm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Ymm, Ymm, Ymm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Ymm, Ymm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Zmm, Zmm, Zmm) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpbusd, Vpdpbusd, Zmm, Zmm, Mem) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Xmm, Xmm, Xmm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Xmm, Xmm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Ymm, Ymm, Ymm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Ymm, Ymm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Zmm, Zmm, Zmm) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpbusds, Vpdpbusds, Zmm, Zmm, Mem) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Xmm, Xmm, Xmm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Xmm, Xmm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Ymm, Ymm, Ymm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Ymm, Ymm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Zmm, Zmm, Zmm) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpwssd, Vpdpwssd, Zmm, Zmm, Mem) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Xmm, Xmm, Xmm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Xmm, Xmm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Ymm, Ymm, Ymm) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Ymm, Ymm, Mem) // AVX512_VNNI{kz|b32}-VL
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Zmm, Zmm, Zmm) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_3x(vpdpwssds, Vpdpwssds, Zmm, Zmm, Mem) // AVX512_VNNI{kz|b32}
+ ASMJIT_INST_4i(vperm2f128, Vperm2f128, Ymm, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_4i(vperm2f128, Vperm2f128, Ymm, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vperm2i128, Vperm2i128, Ymm, Ymm, Ymm, Imm) // AVX2
+ ASMJIT_INST_4i(vperm2i128, Vperm2i128, Ymm, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_3x(vpermb, Vpermb, Xmm, Xmm, Xmm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermb, Vpermb, Xmm, Xmm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermb, Vpermb, Ymm, Ymm, Ymm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermb, Vpermb, Ymm, Ymm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermb, Vpermb, Zmm, Zmm, Zmm) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermb, Vpermb, Zmm, Zmm, Mem) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermd, Vpermd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermd, Vpermd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermd, Vpermd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermd, Vpermd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Xmm, Xmm, Xmm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Xmm, Xmm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Ymm, Ymm, Ymm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Ymm, Ymm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Zmm, Zmm, Zmm) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermi2b, Vpermi2b, Zmm, Zmm, Mem) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermi2d, Vpermi2d, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermi2pd, Vpermi2pd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermi2ps, Vpermi2ps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermi2q, Vpermi2q, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpermi2w, Vpermi2w, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Xmm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Ymm, Ymm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Ymm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermilpd, Vpermilpd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpermilpd, Vpermilpd, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Xmm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Ymm, Ymm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Ymm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermilps, Vpermilps, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpermilps, Vpermilps, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpermpd, Vpermpd, Ymm, Ymm, Imm) // AVX2
+ ASMJIT_INST_3i(vpermpd, Vpermpd, Ymm, Mem, Imm) // AVX2
+ ASMJIT_INST_3x(vpermps, Vpermps, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpermps, Vpermps, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3i(vpermq, Vpermq, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermq, Vpermq, Ymm, Mem, Imm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermq, Vpermq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermq, Vpermq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermq, Vpermq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermq, Vpermq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermq, Vpermq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpermq, Vpermq, Zmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Xmm, Xmm, Xmm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Xmm, Xmm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Ymm, Ymm, Ymm) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Ymm, Ymm, Mem) // AVX512_VBMI{kz}-VL
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Zmm, Zmm, Zmm) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermt2b, Vpermt2b, Zmm, Zmm, Mem) // AVX512_VBMI{kz}
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermt2d, Vpermt2d, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermt2pd, Vpermt2pd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermt2ps, Vpermt2ps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermt2q, Vpermt2q, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpermt2w, Vpermt2w, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpermw, Vpermw, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermw, Vpermw, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermw, Vpermw, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermw, Vpermw, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpermw, Vpermw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpermw, Vpermw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpexpandb, Vpexpandb, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpexpandd, Vpexpandd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Ymm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Zmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpexpandq, Vpexpandq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_2x(vpexpandw, Vpexpandw, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3i(vpextrb, Vpextrb, Gp, Xmm, Imm) // AVX AVX512_BW
+ ASMJIT_INST_3i(vpextrb, Vpextrb, Mem, Xmm, Imm) // AVX AVX512_BW
+ ASMJIT_INST_3i(vpextrd, Vpextrd, Gp, Xmm, Imm) // AVX AVX512_DQ
+ ASMJIT_INST_3i(vpextrd, Vpextrd, Mem, Xmm, Imm) // AVX AVX512_DQ
+ ASMJIT_INST_3i(vpextrq, Vpextrq, Gp, Xmm, Imm) // AVX AVX512_DQ
+ ASMJIT_INST_3i(vpextrq, Vpextrq, Mem, Xmm, Imm) // AVX AVX512_DQ
+ ASMJIT_INST_3i(vpextrw, Vpextrw, Gp, Xmm, Imm) // AVX AVX512_BW
+ ASMJIT_INST_3i(vpextrw, Vpextrw, Mem, Xmm, Imm) // AVX AVX512_BW
+ ASMJIT_INST_3x(vpgatherdd, Vpgatherdd, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vpgatherdd, Vpgatherdd, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vpgatherdd, Vpgatherdd, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherdd, Vpgatherdd, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherdd, Vpgatherdd, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vpgatherdq, Vpgatherdq, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vpgatherdq, Vpgatherdq, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vpgatherdq, Vpgatherdq, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherdq, Vpgatherdq, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherdq, Vpgatherdq, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vpgatherqd, Vpgatherqd, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_2x(vpgatherqd, Vpgatherqd, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherqd, Vpgatherqd, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherqd, Vpgatherqd, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vpgatherqq, Vpgatherqq, Xmm, Mem, Xmm) // AVX2
+ ASMJIT_INST_3x(vpgatherqq, Vpgatherqq, Ymm, Mem, Ymm) // AVX2
+ ASMJIT_INST_2x(vpgatherqq, Vpgatherqq, Xmm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherqq, Vpgatherqq, Ymm, Mem) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpgatherqq, Vpgatherqq, Zmm, Mem) // AVX512_F{k}
+ ASMJIT_INST_3x(vphaddd, Vphaddd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphaddd, Vphaddd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphaddd, Vphaddd, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphaddd, Vphaddd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vphaddsw, Vphaddsw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphaddsw, Vphaddsw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphaddsw, Vphaddsw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphaddsw, Vphaddsw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vphaddw, Vphaddw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphaddw, Vphaddw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphaddw, Vphaddw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphaddw, Vphaddw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_2x(vphminposuw, Vphminposuw, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vphminposuw, Vphminposuw, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphsubd, Vphsubd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphsubd, Vphsubd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphsubd, Vphsubd, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphsubd, Vphsubd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vphsubsw, Vphsubsw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphsubsw, Vphsubsw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphsubsw, Vphsubsw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphsubsw, Vphsubsw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vphsubw, Vphsubw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vphsubw, Vphsubw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vphsubw, Vphsubw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vphsubw, Vphsubw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_4i(vpinsrb, Vpinsrb, Xmm, Xmm, Gp, Imm) // AVX AVX512_BW{kz}
+ ASMJIT_INST_4i(vpinsrb, Vpinsrb, Xmm, Xmm, Mem, Imm) // AVX AVX512_BW{kz}
+ ASMJIT_INST_4i(vpinsrd, Vpinsrd, Xmm, Xmm, Gp, Imm) // AVX AVX512_DQ{kz}
+ ASMJIT_INST_4i(vpinsrd, Vpinsrd, Xmm, Xmm, Mem, Imm) // AVX AVX512_DQ{kz}
+ ASMJIT_INST_4i(vpinsrq, Vpinsrq, Xmm, Xmm, Gp, Imm) // AVX AVX512_DQ{kz}
+ ASMJIT_INST_4i(vpinsrq, Vpinsrq, Xmm, Xmm, Mem, Imm) // AVX AVX512_DQ{kz}
+ ASMJIT_INST_4i(vpinsrw, Vpinsrw, Xmm, Xmm, Gp, Imm) // AVX AVX512_BW{kz}
+ ASMJIT_INST_4i(vpinsrw, Vpinsrw, Xmm, Xmm, Mem, Imm) // AVX AVX512_BW{kz}
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Xmm, Xmm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Xmm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Ymm, Ymm) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Ymm, Mem) // AVX512_CD{kz|b32}-VL
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Zmm, Zmm) // AVX512_CD{kz|b32}
+ ASMJIT_INST_2x(vplzcntd, Vplzcntd, Zmm, Mem) // AVX512_CD{kz|b32}
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Xmm, Xmm) // AVX512_CD{kz|b64}-VL
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Xmm, Mem) // AVX512_CD{kz|b64}-VL
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Ymm, Ymm) // AVX512_CD{kz|b64}-VL
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Ymm, Mem) // AVX512_CD{kz|b64}-VL
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Zmm, Zmm) // AVX512_CD{kz|b64}
+ ASMJIT_INST_2x(vplzcntq, Vplzcntq, Zmm, Mem) // AVX512_CD{kz|b64}
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Xmm, Xmm, Xmm) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Xmm, Xmm, Mem) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Ymm, Ymm, Ymm) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Ymm, Ymm, Mem) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Zmm, Zmm, Zmm) // AVX512_IFMA{kz|b64}
+ ASMJIT_INST_3x(vpmadd52huq, Vpmadd52huq, Zmm, Zmm, Mem) // AVX512_IFMA{kz|b64}
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Xmm, Xmm, Xmm) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Xmm, Xmm, Mem) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Ymm, Ymm, Ymm) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Ymm, Ymm, Mem) // AVX512_IFMA{kz|b64}-VL
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Zmm, Zmm, Zmm) // AVX512_IFMA{kz|b64}
+ ASMJIT_INST_3x(vpmadd52luq, Vpmadd52luq, Zmm, Zmm, Mem) // AVX512_IFMA{kz|b64}
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaddubsw, Vpmaddubsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaddwd, Vpmaddwd, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaskmovd, Vpmaskmovd, Mem, Xmm, Xmm) // AVX2
+ ASMJIT_INST_3x(vpmaskmovd, Vpmaskmovd, Mem, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpmaskmovd, Vpmaskmovd, Xmm, Xmm, Mem) // AVX2
+ ASMJIT_INST_3x(vpmaskmovd, Vpmaskmovd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpmaskmovq, Vpmaskmovq, Mem, Xmm, Xmm) // AVX2
+ ASMJIT_INST_3x(vpmaskmovq, Vpmaskmovq, Mem, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpmaskmovq, Vpmaskmovq, Xmm, Xmm, Mem) // AVX2
+ ASMJIT_INST_3x(vpmaskmovq, Vpmaskmovq, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxsb, Vpmaxsb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmaxsd, Vpmaxsd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmaxsq, Vpmaxsq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxsw, Vpmaxsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxub, Vpmaxub, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmaxud, Vpmaxud, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmaxuq, Vpmaxuq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmaxuw, Vpmaxuw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminsb, Vpminsb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpminsd, Vpminsd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpminsq, Vpminsq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminsw, Vpminsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminub, Vpminub, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminub, Vpminub, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminub, Vpminub, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminub, Vpminub, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminub, Vpminub, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminub, Vpminub, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminud, Vpminud, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminud, Vpminud, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminud, Vpminud, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminud, Vpminud, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpminud, Vpminud, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpminud, Vpminud, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpminuq, Vpminuq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpminuw, Vpminuw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovb2m, Vpmovb2m, KReg, Xmm) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovb2m, Vpmovb2m, KReg, Ymm) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovb2m, Vpmovb2m, KReg, Zmm) // AVX512_BW
+ ASMJIT_INST_2x(vpmovd2m, Vpmovd2m, KReg, Xmm) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovd2m, Vpmovd2m, KReg, Ymm) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovd2m, Vpmovd2m, KReg, Zmm) // AVX512_DQ
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovdb, Vpmovdb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovdw, Vpmovdw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovm2b, Vpmovm2b, Xmm, KReg) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovm2b, Vpmovm2b, Ymm, KReg) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovm2b, Vpmovm2b, Zmm, KReg) // AVX512_BW
+ ASMJIT_INST_2x(vpmovm2d, Vpmovm2d, Xmm, KReg) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovm2d, Vpmovm2d, Ymm, KReg) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovm2d, Vpmovm2d, Zmm, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(vpmovm2q, Vpmovm2q, Xmm, KReg) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovm2q, Vpmovm2q, Ymm, KReg) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovm2q, Vpmovm2q, Zmm, KReg) // AVX512_DQ
+ ASMJIT_INST_2x(vpmovm2w, Vpmovm2w, Xmm, KReg) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovm2w, Vpmovm2w, Ymm, KReg) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovm2w, Vpmovm2w, Zmm, KReg) // AVX512_BW
+ ASMJIT_INST_2x(vpmovmskb, Vpmovmskb, Gp, Xmm) // AVX
+ ASMJIT_INST_2x(vpmovmskb, Vpmovmskb, Gp, Ymm) // AVX2
+ ASMJIT_INST_2x(vpmovq2m, Vpmovq2m, KReg, Xmm) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovq2m, Vpmovq2m, KReg, Ymm) // AVX512_DQ-VL
+ ASMJIT_INST_2x(vpmovq2m, Vpmovq2m, KReg, Zmm) // AVX512_DQ
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovqb, Vpmovqb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovqd, Vpmovqd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovqw, Vpmovqw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsdb, Vpmovsdb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsdw, Vpmovsdw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqb, Vpmovsqb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqd, Vpmovsqd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsqw, Vpmovsqw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Mem, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Xmm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Mem, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Ymm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovswb, Vpmovswb, Mem, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxbd, Vpmovsxbd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxbq, Vpmovsxbq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Zmm, Ymm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovsxbw, Vpmovsxbw, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Zmm, Ymm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxdq, Vpmovsxdq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Zmm, Ymm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxwd, Vpmovsxwd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovsxwq, Vpmovsxwq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusdb, Vpmovusdb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusdw, Vpmovusdw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqb, Vpmovusqb, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Ymm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqd, Vpmovusqd, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Mem, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Xmm, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Mem, Ymm) // AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Xmm, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovusqw, Vpmovusqw, Mem, Zmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Mem, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Xmm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Mem, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Ymm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovuswb, Vpmovuswb, Mem, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovw2m, Vpmovw2m, KReg, Xmm) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovw2m, Vpmovw2m, KReg, Ymm) // AVX512_BW-VL
+ ASMJIT_INST_2x(vpmovw2m, Vpmovw2m, KReg, Zmm) // AVX512_BW
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Mem, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Xmm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Mem, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Ymm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovwb, Vpmovwb, Mem, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxbd, Vpmovzxbd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxbq, Vpmovzxbq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Zmm, Ymm) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovzxbw, Vpmovzxbw, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Zmm, Ymm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxdq, Vpmovzxdq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Zmm, Ymm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxwd, Vpmovzxwd, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_2x(vpmovzxwq, Vpmovzxwq, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmuldq, Vpmuldq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulhrsw, Vpmulhrsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulhuw, Vpmulhuw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulhw, Vpmulhw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmulld, Vpmulld, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Xmm, Xmm, Xmm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Xmm, Xmm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Ymm, Ymm, Ymm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Ymm, Ymm, Mem) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vpmullq, Vpmullq, Zmm, Zmm, Mem) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmullw, Vpmullw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Xmm, Xmm, Xmm) // AVX512_VBMI{kz|b64}-VL
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Xmm, Xmm, Mem) // AVX512_VBMI{kz|b64}-VL
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Ymm, Ymm, Ymm) // AVX512_VBMI{kz|b64}-VL
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Ymm, Ymm, Mem) // AVX512_VBMI{kz|b64}-VL
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Zmm, Zmm, Zmm) // AVX512_VBMI{kz|b64}
+ ASMJIT_INST_3x(vpmultishiftqb, Vpmultishiftqb, Zmm, Zmm, Mem) // AVX512_VBMI{kz|b64}
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpmuludq, Vpmuludq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Xmm, Xmm) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Xmm, Mem) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Ymm, Ymm) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Ymm, Mem) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Zmm, Zmm) // AVX512_BITALG{kz|b32}
+ ASMJIT_INST_2x(vpopcntb, Vpopcntb, Zmm, Mem) // AVX512_BITALG{kz|b32}
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Xmm, Xmm) // AVX512_VPOPCNTDQ{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Xmm, Mem) // AVX512_VPOPCNTDQ{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Ymm, Ymm) // AVX512_VPOPCNTDQ{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Ymm, Mem) // AVX512_VPOPCNTDQ{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Zmm, Zmm) // AVX512_VPOPCNTDQ{kz|b32}
+ ASMJIT_INST_2x(vpopcntd, Vpopcntd, Zmm, Mem) // AVX512_VPOPCNTDQ{kz|b32}
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Xmm, Xmm) // AVX512_VPOPCNTDQ{kz|b64}-VL
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Xmm, Mem) // AVX512_VPOPCNTDQ{kz|b64}-VL
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Ymm, Ymm) // AVX512_VPOPCNTDQ{kz|b64}-VL
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Ymm, Mem) // AVX512_VPOPCNTDQ{kz|b64}-VL
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Zmm, Zmm) // AVX512_VPOPCNTDQ{kz|b64}
+ ASMJIT_INST_2x(vpopcntq, Vpopcntq, Zmm, Mem) // AVX512_VPOPCNTDQ{kz|b64}
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Xmm, Xmm) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Xmm, Mem) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Ymm, Ymm) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Ymm, Mem) // AVX512_BITALG{kz|b32}-VL
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Zmm, Zmm) // AVX512_BITALG{kz|b32}
+ ASMJIT_INST_2x(vpopcntw, Vpopcntw, Zmm, Mem) // AVX512_BITALG{kz|b32}
+ ASMJIT_INST_3x(vpor, Vpor, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpor, Vpor, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpor, Vpor, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpor, Vpor, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpord, Vpord, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpord, Vpord, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpord, Vpord, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpord, Vpord, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpord, Vpord, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpord, Vpord, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vporq, Vporq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vporq, Vporq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vporq, Vporq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vporq, Vporq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vporq, Vporq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vporq, Vporq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vprold, Vprold, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprold, Vprold, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprold, Vprold, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprold, Vprold, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprold, Vprold, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vprold, Vprold, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vprolq, Vprolq, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprolq, Vprolq, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprolq, Vprolq, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprolq, Vprolq, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprolq, Vprolq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vprolq, Vprolq, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vprolvd, Vprolvd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vprolvq, Vprolvq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vprord, Vprord, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprord, Vprord, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprord, Vprord, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprord, Vprord, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vprord, Vprord, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vprord, Vprord, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vprorq, Vprorq, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprorq, Vprorq, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprorq, Vprorq, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprorq, Vprorq, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vprorq, Vprorq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vprorq, Vprorq, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vprorvd, Vprorvd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vprorvq, Vprorvq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Xmm, Xmm, Xmm) // AVX AVX512_BW-VL
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Xmm, Xmm, Mem) // AVX AVX512_BW-VL
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW-VL
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Ymm, Ymm, Mem) // AVX2 AVX512_BW-VL
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Zmm, Zmm, Zmm) // AVX512_BW
+ ASMJIT_INST_3x(vpsadbw, Vpsadbw, Zmm, Zmm, Mem) // AVX512_BW
+ ASMJIT_INST_2x(vpscatterdd, Vpscatterdd, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterdd, Vpscatterdd, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterdd, Vpscatterdd, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_2x(vpscatterdq, Vpscatterdq, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterdq, Vpscatterdq, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterdq, Vpscatterdq, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_2x(vpscatterqd, Vpscatterqd, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterqd, Vpscatterqd, Mem, Ymm) // AVX512_F{k}
+ ASMJIT_INST_2x(vpscatterqq, Vpscatterqq, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterqq, Vpscatterqq, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vpscatterqq, Vpscatterqq, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Xmm, Xmm, Xmm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Xmm, Xmm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Ymm, Ymm, Ymm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Ymm, Ymm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Zmm, Zmm, Zmm, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_4i(vpshldd, Vpshldd, Zmm, Zmm, Mem, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvd, Vpshldvd, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvq, Vpshldvq, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshldvw, Vpshldvw, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Xmm, Xmm, Xmm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Xmm, Xmm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Ymm, Ymm, Ymm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Ymm, Ymm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Zmm, Zmm, Zmm, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_4i(vpshrdd, Vpshrdd, Zmm, Zmm, Mem, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvd, Vpshrdvd, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvq, Vpshrdvq, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Xmm, Xmm, Xmm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Xmm, Xmm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Ymm, Ymm, Ymm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Ymm, Ymm, Mem) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Zmm, Zmm, Zmm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshrdvw, Vpshrdvw, Zmm, Zmm, Mem) // AVX512_VBMI2{kz}
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Xmm, Xmm, Xmm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Xmm, Xmm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Ymm, Ymm, Ymm, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Ymm, Ymm, Mem, Imm) // AVX512_VBMI2{kz}-VL
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Zmm, Zmm, Zmm, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_4i(vpshrdw, Vpshrdw, Zmm, Zmm, Mem, Imm) // AVX512_VBMI2{kz}
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpshufb, Vpshufb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Xmm, Xmm) // AVX512_BITALG{k}-VL
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Xmm, Mem) // AVX512_BITALG{k}-VL
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Ymm, Ymm) // AVX512_BITALG{k}-VL
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Ymm, Mem) // AVX512_BITALG{k}-VL
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Zmm, Zmm) // AVX512_BITALG{k}
+ ASMJIT_INST_3x(vpshufbitqmb, Vpshufbitqmb, KReg, Zmm, Mem) // AVX512_BITALG{k}
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Xmm, Mem, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Ymm, Mem, Imm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpshufd, Vpshufd, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Xmm, Mem, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Ymm, Mem, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpshufhw, Vpshufhw, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Xmm, Mem, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Ymm, Mem, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpshuflw, Vpshuflw, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsignb, Vpsignb, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpsignb, Vpsignb, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpsignb, Vpsignb, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpsignb, Vpsignb, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpsignd, Vpsignd, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpsignd, Vpsignd, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpsignd, Vpsignd, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpsignd, Vpsignd, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpsignw, Vpsignw, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpsignw, Vpsignw, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpsignw, Vpsignw, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpsignw, Vpsignw, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3i(vpslld, Vpslld, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpslld, Vpslld, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpslld, Vpslld, Xmm, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpslld, Vpslld, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpslld, Vpslld, Ymm, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpslld, Vpslld, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpslld, Vpslld, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpslld, Vpslld, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpslld, Vpslld, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpslld, Vpslld, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpslld, Vpslld, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpslld, Vpslld, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Xmm, Xmm, Imm) // AVX AVX512_BW-VL
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Ymm, Ymm, Imm) // AVX2 AVX512_BW-VL
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Xmm, Mem, Imm) // AVX512_BW-VL
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Ymm, Mem, Imm) // AVX512_BW-VL
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Zmm, Zmm, Imm) // AVX512_BW
+ ASMJIT_INST_3i(vpslldq, Vpslldq, Zmm, Mem, Imm) // AVX512_BW
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Xmm, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Ymm, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpsllq, Vpsllq, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpsllq, Vpsllq, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Xmm, Xmm, Xmm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Xmm, Xmm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsllvd, Vpsllvd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Xmm, Xmm, Xmm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Xmm, Xmm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsllvq, Vpsllvq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsllvw, Vpsllvw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Ymm, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Xmm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Ymm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Zmm, Zmm, Xmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsllw, Vpsllw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsllw, Vpsllw, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Xmm, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Ymm, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpsrad, Vpsrad, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpsrad, Vpsrad, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Xmm, Xmm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Xmm, Xmm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Ymm, Ymm, Xmm) // AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Ymm, Ymm, Mem) // AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpsraq, Vpsraq, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpsraq, Vpsraq, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Xmm, Xmm, Xmm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Xmm, Xmm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsravd, Vpsravd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsravq, Vpsravq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsravw, Vpsravw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Ymm, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Xmm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Ymm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Zmm, Zmm, Xmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsraw, Vpsraw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsraw, Vpsraw, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Xmm, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Ymm, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpsrld, Vpsrld, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpsrld, Vpsrld, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Xmm, Xmm, Imm) // AVX AVX512_BW-VL
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Ymm, Ymm, Imm) // AVX2 AVX512_BW-VL
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Xmm, Mem, Imm) // AVX512_BW-VL
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Ymm, Mem, Imm) // AVX512_BW-VL
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Zmm, Zmm, Imm) // AVX512_BW
+ ASMJIT_INST_3i(vpsrldq, Vpsrldq, Zmm, Mem, Imm) // AVX512_BW
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Xmm, Xmm, Mem) // AVX AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Ymm, Ymm, Imm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Ymm, Ymm, Xmm) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz}-VL
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Zmm, Zmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vpsrlq, Vpsrlq, Zmm, Zmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3i(vpsrlq, Vpsrlq, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Xmm, Xmm, Xmm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Xmm, Xmm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsrlvd, Vpsrlvd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Xmm, Xmm, Xmm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Xmm, Xmm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsrlvq, Vpsrlvq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Xmm, Xmm, Xmm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Xmm, Xmm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Ymm, Ymm, Ymm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Ymm, Ymm, Mem) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsrlvw, Vpsrlvw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Xmm, Xmm, Imm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Ymm, Ymm, Imm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Ymm, Ymm, Xmm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Xmm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Ymm, Mem, Imm) // AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Zmm, Zmm, Xmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsrlw, Vpsrlw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Zmm, Zmm, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3i(vpsrlw, Vpsrlw, Zmm, Mem, Imm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubb, Vpsubb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsubd, Vpsubd, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsubq, Vpsubq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubsb, Vpsubsb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubsw, Vpsubsw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubusb, Vpsubusb, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubusw, Vpsubusw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpsubw, Vpsubw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vpternlogd, Vpternlogd, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vpternlogq, Vpternlogq, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vptest, Vptest, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vptest, Vptest, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vptest, Vptest, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vptest, Vptest, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestmb, Vptestmb, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Xmm, Xmm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Xmm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Ymm, Ymm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Ymm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Zmm, Zmm) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vptestmd, Vptestmd, KReg, Zmm, Mem) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Xmm, Xmm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Xmm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Ymm, Ymm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Ymm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Zmm, Zmm) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vptestmq, Vptestmq, KReg, Zmm, Mem) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestmw, Vptestmw, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestnmb, Vptestnmb, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Xmm, Xmm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Xmm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Ymm, Ymm) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Ymm, Mem) // AVX512_F{k|b32}-VL
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Zmm, Zmm) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vptestnmd, Vptestnmd, KReg, Zmm, Mem) // AVX512_F{k|b32}
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Xmm, Xmm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Xmm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Ymm, Ymm) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Ymm, Mem) // AVX512_F{k|b64}-VL
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Zmm, Zmm) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vptestnmq, Vptestnmq, KReg, Zmm, Mem) // AVX512_F{k|b64}
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Xmm, Xmm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Xmm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Ymm, Ymm) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Ymm, Mem) // AVX512_BW{k}-VL
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Zmm, Zmm) // AVX512_BW{k}
+ ASMJIT_INST_3x(vptestnmw, Vptestnmw, KReg, Zmm, Mem) // AVX512_BW{k}
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpckhbw, Vpunpckhbw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpunpckhdq, Vpunpckhdq, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpunpckhqdq, Vpunpckhqdq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpckhwd, Vpunpckhwd, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpcklbw, Vpunpcklbw, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpunpckldq, Vpunpckldq, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpunpcklqdq, Vpunpcklqdq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Xmm, Xmm, Xmm) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Xmm, Xmm, Mem) // AVX AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Ymm, Ymm, Ymm) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Ymm, Ymm, Mem) // AVX2 AVX512_BW{kz}-VL
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Zmm, Zmm, Zmm) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpunpcklwd, Vpunpcklwd, Zmm, Zmm, Mem) // AVX512_BW{kz}
+ ASMJIT_INST_3x(vpxor, Vpxor, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vpxor, Vpxor, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vpxor, Vpxor, Ymm, Ymm, Ymm) // AVX2
+ ASMJIT_INST_3x(vpxor, Vpxor, Ymm, Ymm, Mem) // AVX2
+ ASMJIT_INST_3x(vpxord, Vpxord, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpxord, Vpxord, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpxord, Vpxord, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpxord, Vpxord, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vpxord, Vpxord, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpxord, Vpxord, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vpxorq, Vpxorq, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Ymm, Ymm, Ymm, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Ymm, Ymm, Mem, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Zmm, Zmm, Zmm, Imm) // AVX512_DQ{kz|sae|b64}
+ ASMJIT_INST_4i(vrangepd, Vrangepd, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz|sae|b64}
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Ymm, Ymm, Ymm, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Ymm, Ymm, Mem, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Zmm, Zmm, Zmm, Imm) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_4i(vrangeps, Vrangeps, Zmm, Zmm, Mem, Imm) // AVX512_DQ{kz|sae|b32}
+ ASMJIT_INST_4i(vrangesd, Vrangesd, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz|sae}
+ ASMJIT_INST_4i(vrangesd, Vrangesd, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz|sae}
+ ASMJIT_INST_4i(vrangess, Vrangess, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz|sae}
+ ASMJIT_INST_4i(vrangess, Vrangess, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz|sae}
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vrcp14pd, Vrcp14pd, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vrcp14ps, Vrcp14ps, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vrcp14sd, Vrcp14sd, Xmm, Xmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrcp14sd, Vrcp14sd, Xmm, Xmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrcp14ss, Vrcp14ss, Xmm, Xmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrcp14ss, Vrcp14ss, Xmm, Xmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vrcp28pd, Vrcp28pd, Zmm, Zmm) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vrcp28pd, Vrcp28pd, Zmm, Mem) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vrcp28ps, Vrcp28ps, Zmm, Zmm) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_2x(vrcp28ps, Vrcp28ps, Zmm, Mem) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_3x(vrcp28sd, Vrcp28sd, Xmm, Xmm, Xmm) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrcp28sd, Vrcp28sd, Xmm, Xmm, Mem) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrcp28ss, Vrcp28ss, Xmm, Xmm, Xmm) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrcp28ss, Vrcp28ss, Xmm, Xmm, Mem) // AVX512_ER{kz|sae}
+ ASMJIT_INST_2x(vrcpps, Vrcpps, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vrcpps, Vrcpps, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vrcpps, Vrcpps, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vrcpps, Vrcpps, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vrcpss, Vrcpss, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vrcpss, Vrcpss, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Xmm, Xmm, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Xmm, Mem, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Ymm, Ymm, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Ymm, Mem, Imm) // AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Zmm, Zmm, Imm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3i(vreducepd, Vreducepd, Zmm, Mem, Imm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Xmm, Xmm, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Xmm, Mem, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Ymm, Ymm, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Ymm, Mem, Imm) // AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Zmm, Zmm, Imm) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3i(vreduceps, Vreduceps, Zmm, Mem, Imm) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_4i(vreducesd, Vreducesd, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vreducesd, Vreducesd, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vreducess, Vreducess, Xmm, Xmm, Xmm, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_4i(vreducess, Vreducess, Xmm, Xmm, Mem, Imm) // AVX512_DQ{kz}
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Xmm, Xmm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Xmm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3i(vrndscalepd, Vrndscalepd, Zmm, Mem, Imm) // AVX512_F{kz|sae|b64}
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Xmm, Xmm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Xmm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Zmm, Zmm, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_3i(vrndscaleps, Vrndscaleps, Zmm, Mem, Imm) // AVX512_F{kz|sae|b32}
+ ASMJIT_INST_4i(vrndscalesd, Vrndscalesd, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vrndscalesd, Vrndscalesd, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vrndscaless, Vrndscaless, Xmm, Xmm, Xmm, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_4i(vrndscaless, Vrndscaless, Xmm, Xmm, Mem, Imm) // AVX512_F{kz|sae}
+ ASMJIT_INST_3i(vroundpd, Vroundpd, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_3i(vroundpd, Vroundpd, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_3i(vroundpd, Vroundpd, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_3i(vroundpd, Vroundpd, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_3i(vroundps, Vroundps, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_3i(vroundps, Vroundps, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_3i(vroundps, Vroundps, Ymm, Ymm, Imm) // AVX
+ ASMJIT_INST_3i(vroundps, Vroundps, Ymm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vroundsd, Vroundsd, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vroundsd, Vroundsd, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_4i(vroundss, Vroundss, Xmm, Xmm, Xmm, Imm) // AVX
+ ASMJIT_INST_4i(vroundss, Vroundss, Xmm, Xmm, Mem, Imm) // AVX
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vrsqrt14pd, Vrsqrt14pd, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_2x(vrsqrt14ps, Vrsqrt14ps, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vrsqrt14sd, Vrsqrt14sd, Xmm, Xmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrsqrt14sd, Vrsqrt14sd, Xmm, Xmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrsqrt14ss, Vrsqrt14ss, Xmm, Xmm, Xmm) // AVX512_F{kz}
+ ASMJIT_INST_3x(vrsqrt14ss, Vrsqrt14ss, Xmm, Xmm, Mem) // AVX512_F{kz}
+ ASMJIT_INST_2x(vrsqrt28pd, Vrsqrt28pd, Zmm, Zmm) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vrsqrt28pd, Vrsqrt28pd, Zmm, Mem) // AVX512_ER{kz|sae|b64}
+ ASMJIT_INST_2x(vrsqrt28ps, Vrsqrt28ps, Zmm, Zmm) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_2x(vrsqrt28ps, Vrsqrt28ps, Zmm, Mem) // AVX512_ER{kz|sae|b32}
+ ASMJIT_INST_3x(vrsqrt28sd, Vrsqrt28sd, Xmm, Xmm, Xmm) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrsqrt28sd, Vrsqrt28sd, Xmm, Xmm, Mem) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrsqrt28ss, Vrsqrt28ss, Xmm, Xmm, Xmm) // AVX512_ER{kz|sae}
+ ASMJIT_INST_3x(vrsqrt28ss, Vrsqrt28ss, Xmm, Xmm, Mem) // AVX512_ER{kz|sae}
+ ASMJIT_INST_2x(vrsqrtps, Vrsqrtps, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vrsqrtps, Vrsqrtps, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vrsqrtps, Vrsqrtps, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vrsqrtps, Vrsqrtps, Ymm, Mem) // AVX
+ ASMJIT_INST_3x(vrsqrtss, Vrsqrtss, Xmm, Xmm, Xmm) // AVX
+ ASMJIT_INST_3x(vrsqrtss, Vrsqrtss, Xmm, Xmm, Mem) // AVX
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Xmm, Xmm, Xmm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Xmm, Xmm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Ymm, Ymm, Ymm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Ymm, Ymm, Mem) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vscalefpd, Vscalefpd, Zmm, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Xmm, Xmm, Xmm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Xmm, Xmm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Ymm, Ymm, Ymm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Ymm, Ymm, Mem) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vscalefps, Vscalefps, Zmm, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vscalefsd, Vscalefsd, Xmm, Xmm, Xmm) // AVX512_F{kz|er}
+ ASMJIT_INST_3x(vscalefsd, Vscalefsd, Xmm, Xmm, Mem) // AVX512_F{kz|er}
+ ASMJIT_INST_3x(vscalefss, Vscalefss, Xmm, Xmm, Xmm) // AVX512_F{kz|er}
+ ASMJIT_INST_3x(vscalefss, Vscalefss, Xmm, Xmm, Mem) // AVX512_F{kz|er}
+ ASMJIT_INST_2x(vscatterdpd, Vscatterdpd, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterdpd, Vscatterdpd, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterdpd, Vscatterdpd, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_2x(vscatterdps, Vscatterdps, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterdps, Vscatterdps, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterdps, Vscatterdps, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_1x(vscatterpf0dpd, Vscatterpf0dpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf0dps, Vscatterpf0dps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf0qpd, Vscatterpf0qpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf0qps, Vscatterpf0qps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf1dpd, Vscatterpf1dpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf1dps, Vscatterpf1dps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf1qpd, Vscatterpf1qpd, Mem) // AVX512_PF{k}
+ ASMJIT_INST_1x(vscatterpf1qps, Vscatterpf1qps, Mem) // AVX512_PF{k}
+ ASMJIT_INST_2x(vscatterqpd, Vscatterqpd, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterqpd, Vscatterqpd, Mem, Ymm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterqpd, Vscatterqpd, Mem, Zmm) // AVX512_F{k}
+ ASMJIT_INST_2x(vscatterqps, Vscatterqps, Mem, Xmm) // AVX512_F{k}-VL
+ ASMJIT_INST_2x(vscatterqps, Vscatterqps, Mem, Ymm) // AVX512_F{k}
+ ASMJIT_INST_4i(vshuff32x4, Vshuff32x4, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshuff32x4, Vshuff32x4, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshuff32x4, Vshuff32x4, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshuff32x4, Vshuff32x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshuff64x2, Vshuff64x2, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshuff64x2, Vshuff64x2, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshuff64x2, Vshuff64x2, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vshuff64x2, Vshuff64x2, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vshufi32x4, Vshufi32x4, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufi32x4, Vshufi32x4, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufi32x4, Vshufi32x4, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshufi32x4, Vshufi32x4, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshufi64x2, Vshufi64x2, Ymm, Ymm, Ymm, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufi64x2, Vshufi64x2, Ymm, Ymm, Mem, Imm) // AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufi64x2, Vshufi64x2, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vshufi64x2, Vshufi64x2, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Xmm, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Xmm, Xmm, Mem, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Ymm, Ymm, Ymm, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Ymm, Ymm, Mem, Imm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshufpd, Vshufpd, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b32}
+ ASMJIT_INST_4i(vshufps, Vshufps, Xmm, Xmm, Xmm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufps, Vshufps, Xmm, Xmm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufps, Vshufps, Ymm, Ymm, Ymm, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufps, Vshufps, Ymm, Ymm, Mem, Imm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_4i(vshufps, Vshufps, Zmm, Zmm, Zmm, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_4i(vshufps, Vshufps, Zmm, Zmm, Mem, Imm) // AVX512_F{kz|b64}
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vsqrtpd, Vsqrtpd, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_2x(vsqrtps, Vsqrtps, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vsqrtsd, Vsqrtsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsqrtsd, Vsqrtsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsqrtss, Vsqrtss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsqrtss, Vsqrtss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_1x(vstmxcsr, Vstmxcsr, Mem) // AVX
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vsubpd, Vsubpd, Zmm, Zmm, Mem) // AVX512_F{kz|er|b64}
+ ASMJIT_INST_3x(vsubps, Vsubps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vsubps, Vsubps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vsubps, Vsubps, Ymm, Ymm, Ymm) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vsubps, Vsubps, Ymm, Ymm, Mem) // AVX2 AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vsubps, Vsubps, Zmm, Zmm, Zmm) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vsubps, Vsubps, Zmm, Zmm, Mem) // AVX512_F{kz|er|b32}
+ ASMJIT_INST_3x(vsubsd, Vsubsd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsubsd, Vsubsd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsubss, Vsubss, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_3x(vsubss, Vsubss, Xmm, Xmm, Mem) // AVX AVX512_F{kz|er}
+ ASMJIT_INST_2x(vtestpd, Vtestpd, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vtestpd, Vtestpd, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vtestpd, Vtestpd, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vtestpd, Vtestpd, Ymm, Mem) // AVX
+ ASMJIT_INST_2x(vtestps, Vtestps, Xmm, Xmm) // AVX
+ ASMJIT_INST_2x(vtestps, Vtestps, Xmm, Mem) // AVX
+ ASMJIT_INST_2x(vtestps, Vtestps, Ymm, Ymm) // AVX
+ ASMJIT_INST_2x(vtestps, Vtestps, Ymm, Mem) // AVX
+ ASMJIT_INST_2x(vucomisd, Vucomisd, Xmm, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vucomisd, Vucomisd, Xmm, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vucomiss, Vucomiss, Xmm, Xmm) // AVX AVX512_F{sae}
+ ASMJIT_INST_2x(vucomiss, Vucomiss, Xmm, Mem) // AVX AVX512_F{sae}
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vunpckhpd, Vunpckhpd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vunpckhps, Vunpckhps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b64}-VL
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Zmm, Zmm, Zmm) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vunpcklpd, Vunpcklpd, Zmm, Zmm, Mem) // AVX512_F{kz|b64}
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Xmm, Xmm, Xmm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Xmm, Xmm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Ymm, Ymm, Ymm) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Ymm, Ymm, Mem) // AVX AVX512_F{kz|b32}-VL
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Zmm, Zmm, Zmm) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vunpcklps, Vunpcklps, Zmm, Zmm, Mem) // AVX512_F{kz|b32}
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b64}-VL
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vxorpd, Vxorpd, Zmm, Zmm, Mem) // AVX512_DQ{kz|b64}
+ ASMJIT_INST_3x(vxorps, Vxorps, Xmm, Xmm, Xmm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vxorps, Vxorps, Xmm, Xmm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vxorps, Vxorps, Ymm, Ymm, Ymm) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vxorps, Vxorps, Ymm, Ymm, Mem) // AVX AVX512_DQ{kz|b32}-VL
+ ASMJIT_INST_3x(vxorps, Vxorps, Zmm, Zmm, Zmm) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_3x(vxorps, Vxorps, Zmm, Zmm, Mem) // AVX512_DQ{kz|b32}
+ ASMJIT_INST_0x(vzeroall, Vzeroall) // AVX
+ ASMJIT_INST_0x(vzeroupper, Vzeroupper) // AVX
+
+ //! \}
+
+ //! \name FMA4 Instructions
+ //! \{
+
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddpd, Vfmaddpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddps, Vfmaddps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddsd, Vfmaddsd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsd, Vfmaddsd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsd, Vfmaddsd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddss, Vfmaddss, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddss, Vfmaddss, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddss, Vfmaddss, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubpd, Vfmaddsubpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmaddsubps, Vfmaddsubps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddpd, Vfmsubaddpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubaddps, Vfmsubaddps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubpd, Vfmsubpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfmsubps, Vfmsubps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubsd, Vfmsubsd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubsd, Vfmsubsd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubsd, Vfmsubsd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfmsubss, Vfmsubss, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubss, Vfmsubss, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfmsubss, Vfmsubss, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmaddpd, Vfnmaddpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmaddps, Vfnmaddps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddsd, Vfnmaddsd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddsd, Vfnmaddsd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddsd, Vfnmaddsd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmaddss, Vfnmaddss, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddss, Vfnmaddss, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmaddss, Vfnmaddss, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmsubpd, Vfnmsubpd, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Ymm, Ymm, Ymm, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Ymm, Ymm, Mem, Ymm) // FMA4
+ ASMJIT_INST_4x(vfnmsubps, Vfnmsubps, Ymm, Ymm, Ymm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubsd, Vfnmsubsd, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubsd, Vfnmsubsd, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubsd, Vfnmsubsd, Xmm, Xmm, Xmm, Mem) // FMA4
+ ASMJIT_INST_4x(vfnmsubss, Vfnmsubss, Xmm, Xmm, Xmm, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubss, Vfnmsubss, Xmm, Xmm, Mem, Xmm) // FMA4
+ ASMJIT_INST_4x(vfnmsubss, Vfnmsubss, Xmm, Xmm, Xmm, Mem) // FMA4
+
+ //! \}
+
+ //! \name XOP Instructions (Deprecated)
+ //! \{
+
+ ASMJIT_INST_2x(vfrczpd, Vfrczpd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vfrczpd, Vfrczpd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vfrczpd, Vfrczpd, Ymm, Ymm) // XOP
+ ASMJIT_INST_2x(vfrczpd, Vfrczpd, Ymm, Mem) // XOP
+ ASMJIT_INST_2x(vfrczps, Vfrczps, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vfrczps, Vfrczps, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vfrczps, Vfrczps, Ymm, Ymm) // XOP
+ ASMJIT_INST_2x(vfrczps, Vfrczps, Ymm, Mem) // XOP
+ ASMJIT_INST_2x(vfrczsd, Vfrczsd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vfrczsd, Vfrczsd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vfrczss, Vfrczss, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vfrczss, Vfrczss, Xmm, Mem) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Xmm, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Ymm, Ymm, Ymm, Ymm) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Ymm, Ymm, Mem, Ymm) // XOP
+ ASMJIT_INST_4x(vpcmov, Vpcmov, Ymm, Ymm, Ymm, Mem) // XOP
+ ASMJIT_INST_4i(vpcomb, Vpcomb, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomb, Vpcomb, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomd, Vpcomd, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomd, Vpcomd, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomq, Vpcomq, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomq, Vpcomq, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomw, Vpcomw, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomw, Vpcomw, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomub, Vpcomub, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomub, Vpcomub, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomud, Vpcomud, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomud, Vpcomud, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomuq, Vpcomuq, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomuq, Vpcomuq, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_4i(vpcomuw, Vpcomuw, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_4i(vpcomuw, Vpcomuw, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Xmm, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Xmm, Xmm, Mem, Xmm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Xmm, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Ymm, Ymm, Ymm, Ymm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Ymm, Ymm, Mem, Ymm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2pd, Vpermil2pd, Ymm, Ymm, Ymm, Mem, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Xmm, Xmm, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Xmm, Xmm, Mem, Xmm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Xmm, Xmm, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Ymm, Ymm, Ymm, Ymm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Ymm, Ymm, Mem, Ymm, Imm) // XOP
+ ASMJIT_INST_5i(vpermil2ps, Vpermil2ps, Ymm, Ymm, Ymm, Mem, Imm) // XOP
+ ASMJIT_INST_2x(vphaddbd, Vphaddbd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddbd, Vphaddbd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddbq, Vphaddbq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddbq, Vphaddbq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddbw, Vphaddbw, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddbw, Vphaddbw, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphadddq, Vphadddq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphadddq, Vphadddq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddwd, Vphaddwd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddwd, Vphaddwd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddwq, Vphaddwq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddwq, Vphaddwq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddubd, Vphaddubd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddubd, Vphaddubd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddubq, Vphaddubq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddubq, Vphaddubq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddubw, Vphaddubw, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddubw, Vphaddubw, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphaddudq, Vphaddudq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphaddudq, Vphaddudq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphadduwd, Vphadduwd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphadduwd, Vphadduwd, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphadduwq, Vphadduwq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphadduwq, Vphadduwq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphsubbw, Vphsubbw, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphsubbw, Vphsubbw, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphsubdq, Vphsubdq, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphsubdq, Vphsubdq, Xmm, Mem) // XOP
+ ASMJIT_INST_2x(vphsubwd, Vphsubwd, Xmm, Xmm) // XOP
+ ASMJIT_INST_2x(vphsubwd, Vphsubwd, Xmm, Mem) // XOP
+ ASMJIT_INST_4x(vpmacsdd, Vpmacsdd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsdd, Vpmacsdd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsdqh, Vpmacsdqh, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsdqh, Vpmacsdqh, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsdql, Vpmacsdql, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsdql, Vpmacsdql, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacswd, Vpmacswd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacswd, Vpmacswd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsww, Vpmacsww, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsww, Vpmacsww, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdd, Vpmacssdd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdd, Vpmacssdd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdqh, Vpmacssdqh, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdqh, Vpmacssdqh, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdql, Vpmacssdql, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssdql, Vpmacssdql, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsswd, Vpmacsswd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacsswd, Vpmacsswd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssww, Vpmacssww, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmacssww, Vpmacssww, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmadcsswd, Vpmadcsswd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmadcsswd, Vpmadcsswd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpmadcswd, Vpmadcswd, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpmadcswd, Vpmadcswd, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpperm, Vpperm, Xmm, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_4x(vpperm, Vpperm, Xmm, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_4x(vpperm, Vpperm, Xmm, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vprotb, Vprotb, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vprotb, Vprotb, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vprotb, Vprotb, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3i(vprotb, Vprotb, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_3i(vprotb, Vprotb, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_3x(vprotd, Vprotd, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vprotd, Vprotd, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vprotd, Vprotd, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3i(vprotd, Vprotd, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_3i(vprotd, Vprotd, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_3x(vprotq, Vprotq, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vprotq, Vprotq, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vprotq, Vprotq, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3i(vprotq, Vprotq, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_3i(vprotq, Vprotq, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_3x(vprotw, Vprotw, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vprotw, Vprotw, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vprotw, Vprotw, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3i(vprotw, Vprotw, Xmm, Xmm, Imm) // XOP
+ ASMJIT_INST_3i(vprotw, Vprotw, Xmm, Mem, Imm) // XOP
+ ASMJIT_INST_3x(vpshab, Vpshab, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshab, Vpshab, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshab, Vpshab, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshad, Vpshad, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshad, Vpshad, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshad, Vpshad, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshaq, Vpshaq, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshaq, Vpshaq, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshaq, Vpshaq, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshaw, Vpshaw, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshaw, Vpshaw, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshaw, Vpshaw, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshlb, Vpshlb, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlb, Vpshlb, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlb, Vpshlb, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshld, Vpshld, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshld, Vpshld, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshld, Vpshld, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshlq, Vpshlq, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlq, Vpshlq, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlq, Vpshlq, Xmm, Xmm, Mem) // XOP
+ ASMJIT_INST_3x(vpshlw, Vpshlw, Xmm, Xmm, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlw, Vpshlw, Xmm, Mem, Xmm) // XOP
+ ASMJIT_INST_3x(vpshlw, Vpshlw, Xmm, Xmm, Mem) // XOP
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::x86::EmitterImplicitT]
+// ============================================================================
+
+template<typename This>
+struct EmitterImplicitT : public EmitterExplicitT<This> {
+ //! \name Prefix Options
+ //! \{
+
+ //! Use REP/REPE prefix.
+ inline This& rep() noexcept { return EmitterExplicitT<This>::_addInstOptions(Inst::kOptionRep); }
+ //! Use REP/REPE prefix.
+ inline This& repe() noexcept { return rep(); }
+ //! Use REP/REPE prefix.
+ inline This& repz() noexcept { return rep(); }
+
+ //! Use REPNE prefix.
+ inline This& repne() noexcept { return EmitterExplicitT<This>::_addInstOptions(Inst::kOptionRepne); }
+ //! Use REPNE prefix.
+ inline This& repnz() noexcept { return repne(); }
+
+ //! \}
+
+ //! \name Base Instructions & GP Extensions
+ //! \{
+
+ //! \cond
+ using EmitterExplicitT<This>::_emitter;
+
+ // TODO: xrstor and xsave don't have explicit variants yet.
+ using EmitterExplicitT<This>::cbw;
+ using EmitterExplicitT<This>::cdq;
+ using EmitterExplicitT<This>::cdqe;
+ using EmitterExplicitT<This>::clzero;
+ using EmitterExplicitT<This>::cqo;
+ using EmitterExplicitT<This>::cwd;
+ using EmitterExplicitT<This>::cwde;
+ using EmitterExplicitT<This>::cmpsd;
+ using EmitterExplicitT<This>::cmpxchg;
+ using EmitterExplicitT<This>::cmpxchg8b;
+ using EmitterExplicitT<This>::cmpxchg16b;
+ using EmitterExplicitT<This>::cpuid;
+ using EmitterExplicitT<This>::div;
+ using EmitterExplicitT<This>::idiv;
+ using EmitterExplicitT<This>::imul;
+ using EmitterExplicitT<This>::jecxz;
+ using EmitterExplicitT<This>::lahf;
+ using EmitterExplicitT<This>::mulx;
+ using EmitterExplicitT<This>::movsd;
+ using EmitterExplicitT<This>::mul;
+ using EmitterExplicitT<This>::rdmsr;
+ using EmitterExplicitT<This>::rdpmc;
+ using EmitterExplicitT<This>::rdtsc;
+ using EmitterExplicitT<This>::rdtscp;
+ using EmitterExplicitT<This>::sahf;
+ using EmitterExplicitT<This>::wrmsr;
+ using EmitterExplicitT<This>::xgetbv;
+ using EmitterExplicitT<This>::xsetbv;
+ //! \endcond
+
+ ASMJIT_INST_0x(cbw, Cbw) // ANY [IMPLICIT] AX <- Sign Extend AL
+ ASMJIT_INST_0x(cdq, Cdq) // ANY [IMPLICIT] EDX:EAX <- Sign Extend EAX
+ ASMJIT_INST_0x(cdqe, Cdqe) // X64 [IMPLICIT] RAX <- Sign Extend EAX
+ ASMJIT_INST_2x(cmpxchg, Cmpxchg, Gp, Gp) // I486 [IMPLICIT]
+ ASMJIT_INST_2x(cmpxchg, Cmpxchg, Mem, Gp) // I486 [IMPLICIT]
+ ASMJIT_INST_1x(cmpxchg16b, Cmpxchg16b, Mem) // CMPXCHG8B [IMPLICIT] m == RDX:RAX ? m <- RCX:RBX
+ ASMJIT_INST_1x(cmpxchg8b, Cmpxchg8b, Mem) // CMPXCHG16B[IMPLICIT] m == EDX:EAX ? m <- ECX:EBX
+ ASMJIT_INST_0x(cpuid, Cpuid) // I486 [IMPLICIT] EAX:EBX:ECX:EDX <- CPUID[EAX:ECX]
+ ASMJIT_INST_0x(cqo, Cqo) // X64 [IMPLICIT] RDX:RAX <- Sign Extend RAX
+ ASMJIT_INST_0x(cwd, Cwd) // ANY [IMPLICIT] DX:AX <- Sign Extend AX
+ ASMJIT_INST_0x(cwde, Cwde) // ANY [IMPLICIT] EAX <- Sign Extend AX
+ ASMJIT_INST_0x(daa, Daa)
+ ASMJIT_INST_0x(das, Das)
+ ASMJIT_INST_1x(div, Div, Gp) // ANY [IMPLICIT] {AH[Rem]: AL[Quot] <- AX / r8} {xDX[Rem]:xAX[Quot] <- DX:AX / r16|r32|r64}
+ ASMJIT_INST_1x(div, Div, Mem) // ANY [IMPLICIT] {AH[Rem]: AL[Quot] <- AX / m8} {xDX[Rem]:xAX[Quot] <- DX:AX / m16|m32|m64}
+ ASMJIT_INST_1x(idiv, Idiv, Gp) // ANY [IMPLICIT] {AH[Rem]: AL[Quot] <- AX / r8} {xDX[Rem]:xAX[Quot] <- DX:AX / r16|r32|r64}
+ ASMJIT_INST_1x(idiv, Idiv, Mem) // ANY [IMPLICIT] {AH[Rem]: AL[Quot] <- AX / m8} {xDX[Rem]:xAX[Quot] <- DX:AX / m16|m32|m64}
+ ASMJIT_INST_1x(imul, Imul, Gp) // ANY [IMPLICIT] {AX <- AL * r8} {xAX:xDX <- xAX * r16|r32|r64}
+ ASMJIT_INST_1x(imul, Imul, Mem) // ANY [IMPLICIT] {AX <- AL * m8} {xAX:xDX <- xAX * m16|m32|m64}
+ ASMJIT_INST_0x(iret, Iret) // ANY [IMPLICIT]
+ ASMJIT_INST_0x(iretd, Iretd) // ANY [IMPLICIT]
+ ASMJIT_INST_0x(iretq, Iretq) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(iretw, Iretw) // ANY [IMPLICIT]
+ ASMJIT_INST_1x(jecxz, Jecxz, Label) // ANY [IMPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_1x(jecxz, Jecxz, Imm) // ANY [IMPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_1x(jecxz, Jecxz, uint64_t) // ANY [IMPLICIT] Short jump if CX/ECX/RCX is zero.
+ ASMJIT_INST_0x(lahf, Lahf) // LAHFSAHF [IMPLICIT] AH <- EFL
+ ASMJIT_INST_1x(loop, Loop, Label) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_1x(loop, Loop, Imm) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_1x(loop, Loop, uint64_t) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0.
+ ASMJIT_INST_1x(loope, Loope, Label) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_1x(loope, Loope, Imm) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_1x(loope, Loope, uint64_t) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 1.
+ ASMJIT_INST_1x(loopne, Loopne, Label) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_1x(loopne, Loopne, Imm) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_1x(loopne, Loopne, uint64_t) // ANY [IMPLICIT] Decrement xCX; short jump if xCX != 0 && ZF == 0.
+ ASMJIT_INST_1x(mul, Mul, Gp) // ANY [IMPLICIT] {AX <- AL * r8} {xDX:xAX <- xAX * r16|r32|r64}
+ ASMJIT_INST_1x(mul, Mul, Mem) // ANY [IMPLICIT] {AX <- AL * m8} {xDX:xAX <- xAX * m16|m32|m64}
+ ASMJIT_INST_0x(rdmsr, Rdmsr) // ANY [IMPLICIT]
+ ASMJIT_INST_0x(rdpmc, Rdpmc) // ANY [IMPLICIT]
+ ASMJIT_INST_0x(rdtsc, Rdtsc) // RDTSC [IMPLICIT] EDX:EAX <- CNT
+ ASMJIT_INST_0x(rdtscp, Rdtscp) // RDTSCP [IMPLICIT] EDX:EAX:EXC <- CNT
+ ASMJIT_INST_0x(ret, Ret)
+ ASMJIT_INST_1i(ret, Ret, Imm)
+ ASMJIT_INST_0x(sahf, Sahf) // LAHFSAHF [IMPLICIT] EFL <- AH
+ ASMJIT_INST_0x(syscall, Syscall) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(sysenter, Sysenter) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(sysexit, Sysexit) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(sysexit64, Sysexit64) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(sysret, Sysret) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(sysret64, Sysret64) // X64 [IMPLICIT]
+ ASMJIT_INST_0x(wrmsr, Wrmsr) // ANY [IMPLICIT]
+ ASMJIT_INST_0x(xlatb, Xlatb) // ANY [IMPLICIT]
+
+ //! \}
+
+ //! \name String Instruction Aliases
+ //! \{
+
+ inline Error cmpsb() { return _emitter()->emit(Inst::kIdCmps, EmitterExplicitT<This>::ptr_zsi(0, 1), EmitterExplicitT<This>::ptr_zdi(0, 1)); }
+ inline Error cmpsd() { return _emitter()->emit(Inst::kIdCmps, EmitterExplicitT<This>::ptr_zsi(0, 4), EmitterExplicitT<This>::ptr_zdi(0, 4)); }
+ inline Error cmpsq() { return _emitter()->emit(Inst::kIdCmps, EmitterExplicitT<This>::ptr_zsi(0, 8), EmitterExplicitT<This>::ptr_zdi(0, 8)); }
+ inline Error cmpsw() { return _emitter()->emit(Inst::kIdCmps, EmitterExplicitT<This>::ptr_zsi(0, 2), EmitterExplicitT<This>::ptr_zdi(0, 2)); }
+
+ inline Error lodsb() { return _emitter()->emit(Inst::kIdLods, al , EmitterExplicitT<This>::ptr_zdi(0, 1)); }
+ inline Error lodsd() { return _emitter()->emit(Inst::kIdLods, eax, EmitterExplicitT<This>::ptr_zdi(0, 4)); }
+ inline Error lodsq() { return _emitter()->emit(Inst::kIdLods, rax, EmitterExplicitT<This>::ptr_zdi(0, 8)); }
+ inline Error lodsw() { return _emitter()->emit(Inst::kIdLods, ax , EmitterExplicitT<This>::ptr_zdi(0, 2)); }
+
+ inline Error movsb() { return _emitter()->emit(Inst::kIdMovs, EmitterExplicitT<This>::ptr_zdi(0, 1), EmitterExplicitT<This>::ptr_zsi(0, 1)); }
+ inline Error movsd() { return _emitter()->emit(Inst::kIdMovs, EmitterExplicitT<This>::ptr_zdi(0, 4), EmitterExplicitT<This>::ptr_zsi(0, 4)); }
+ inline Error movsq() { return _emitter()->emit(Inst::kIdMovs, EmitterExplicitT<This>::ptr_zdi(0, 8), EmitterExplicitT<This>::ptr_zsi(0, 8)); }
+ inline Error movsw() { return _emitter()->emit(Inst::kIdMovs, EmitterExplicitT<This>::ptr_zdi(0, 2), EmitterExplicitT<This>::ptr_zsi(0, 2)); }
+
+ inline Error scasb() { return _emitter()->emit(Inst::kIdScas, al , EmitterExplicitT<This>::ptr_zdi(0, 1)); }
+ inline Error scasd() { return _emitter()->emit(Inst::kIdScas, eax, EmitterExplicitT<This>::ptr_zdi(0, 4)); }
+ inline Error scasq() { return _emitter()->emit(Inst::kIdScas, rax, EmitterExplicitT<This>::ptr_zdi(0, 8)); }
+ inline Error scasw() { return _emitter()->emit(Inst::kIdScas, ax , EmitterExplicitT<This>::ptr_zdi(0, 2)); }
+
+ inline Error stosb() { return _emitter()->emit(Inst::kIdStos, EmitterExplicitT<This>::ptr_zdi(0, 1), al ); }
+ inline Error stosd() { return _emitter()->emit(Inst::kIdStos, EmitterExplicitT<This>::ptr_zdi(0, 4), eax); }
+ inline Error stosq() { return _emitter()->emit(Inst::kIdStos, EmitterExplicitT<This>::ptr_zdi(0, 8), rax); }
+ inline Error stosw() { return _emitter()->emit(Inst::kIdStos, EmitterExplicitT<This>::ptr_zdi(0, 2), ax ); }
+
+ //! \}
+
+ //! \name CL Instructions
+ //! \{
+
+ ASMJIT_INST_0x(clzero, Clzero) // CLZERO [IMPLICIT]
+
+ //! \}
+
+ //! \name BMI2 Instructions
+ //! \{
+
+ ASMJIT_INST_3x(mulx, Mulx, Gp, Gp, Gp) // BMI2 [IMPLICIT]
+ ASMJIT_INST_3x(mulx, Mulx, Gp, Gp, Mem) // BMI2 [IMPLICIT]
+
+ //! \}
+
+ //! \name FXSR & XSAVE Instructions
+ //! \{
+
+ ASMJIT_INST_0x(xgetbv, Xgetbv) // XSAVE [IMPLICIT] EDX:EAX <- XCR[ECX]
+ ASMJIT_INST_1x(xrstor, Xrstor, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xrstor64, Xrstor64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_1x(xrstors, Xrstors, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xrstors64, Xrstors64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_1x(xsave, Xsave, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xsave64, Xsave64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_1x(xsavec, Xsavec, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xsavec64, Xsavec64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_1x(xsaveopt, Xsaveopt, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xsaveopt64, Xsaveopt64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_1x(xsaves, Xsaves, Mem) // XSAVE [IMPLICIT]
+ ASMJIT_INST_1x(xsaves64, Xsaves64, Mem) // XSAVE+X64 [IMPLICIT]
+ ASMJIT_INST_0x(xsetbv, Xsetbv) // XSAVE [IMPLICIT] XCR[ECX] <- EDX:EAX
+
+ //! \}
+
+ //! \name Monitor & MWait Instructions
+ //! \{
+
+ ASMJIT_INST_0x(monitor, Monitor)
+ ASMJIT_INST_0x(monitorx, Monitorx)
+ ASMJIT_INST_0x(mwait, Mwait)
+ ASMJIT_INST_0x(mwaitx, Mwaitx)
+
+ //! \}
+
+ //! \name MMX & SSE Instructions
+ //! \{
+
+ //! \cond
+ using EmitterExplicitT<This>::blendvpd;
+ using EmitterExplicitT<This>::blendvps;
+ using EmitterExplicitT<This>::maskmovq;
+ using EmitterExplicitT<This>::maskmovdqu;
+ using EmitterExplicitT<This>::pblendvb;
+ using EmitterExplicitT<This>::pcmpestri;
+ using EmitterExplicitT<This>::pcmpestrm;
+ using EmitterExplicitT<This>::pcmpistri;
+ using EmitterExplicitT<This>::pcmpistrm;
+ //! \endcond
+
+ ASMJIT_INST_2x(blendvpd, Blendvpd, Xmm, Xmm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(blendvpd, Blendvpd, Xmm, Mem) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(blendvps, Blendvps, Xmm, Xmm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(blendvps, Blendvps, Xmm, Mem) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(pblendvb, Pblendvb, Xmm, Xmm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(pblendvb, Pblendvb, Xmm, Mem) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_2x(maskmovq, Maskmovq, Mm, Mm) // SSE [IMPLICIT]
+ ASMJIT_INST_2x(maskmovdqu, Maskmovdqu, Xmm, Xmm) // SSE2 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpestri, Pcmpestri, Xmm, Xmm, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpestri, Pcmpestri, Xmm, Mem, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpestrm, Pcmpestrm, Xmm, Xmm, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpestrm, Pcmpestrm, Xmm, Mem, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpistri, Pcmpistri, Xmm, Xmm, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpistri, Pcmpistri, Xmm, Mem, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpistrm, Pcmpistrm, Xmm, Xmm, Imm) // SSE4_1 [IMPLICIT]
+ ASMJIT_INST_3i(pcmpistrm, Pcmpistrm, Xmm, Mem, Imm) // SSE4_1 [IMPLICIT]
+
+ //! \}
+
+ //! \name SHA Instructions
+ //! \{
+
+ using EmitterExplicitT<This>::sha256rnds2;
+
+ ASMJIT_INST_2x(sha256rnds2, Sha256rnds2, Xmm, Xmm) // SHA [IMPLICIT]
+ ASMJIT_INST_2x(sha256rnds2, Sha256rnds2, Xmm, Mem) // SHA [IMPLICIT]
+
+ //! \}
+
+ //! \name AVX, FMA, and AVX512 Instructions
+ //! \{
+
+ using EmitterExplicitT<This>::vmaskmovdqu;
+ using EmitterExplicitT<This>::vpcmpestri;
+ using EmitterExplicitT<This>::vpcmpestrm;
+ using EmitterExplicitT<This>::vpcmpistri;
+ using EmitterExplicitT<This>::vpcmpistrm;
+
+ ASMJIT_INST_2x(vmaskmovdqu, Vmaskmovdqu, Xmm, Xmm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpestri, Vpcmpestri, Xmm, Xmm, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpestri, Vpcmpestri, Xmm, Mem, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpestrm, Vpcmpestrm, Xmm, Xmm, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpestrm, Vpcmpestrm, Xmm, Mem, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpistri, Vpcmpistri, Xmm, Xmm, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpistri, Vpcmpistri, Xmm, Mem, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpistrm, Vpcmpistrm, Xmm, Xmm, Imm) // AVX [IMPLICIT]
+ ASMJIT_INST_3i(vpcmpistrm, Vpcmpistrm, Xmm, Mem, Imm) // AVX [IMPLICIT]
+
+ //! \}
+};
+
+// ============================================================================
+// [asmjit::x86::Emitter]
+// ============================================================================
+
+//! Emitter (X86).
+//!
+//! \note This class cannot be instantiated, you can only cast to it and use
+//! it as emitter that emits to either `x86::Assembler`, `x86::Builder`, or
+//! `x86::Compiler` (use with caution with `x86::Compiler` as it requires virtual
+//! registers).
+class Emitter : public BaseEmitter, public EmitterImplicitT<Emitter> {
+ ASMJIT_NONCONSTRUCTIBLE(Emitter)
+};
+
+//! \}
+
+#undef ASMJIT_INST_0x
+#undef ASMJIT_INST_1x
+#undef ASMJIT_INST_1i
+#undef ASMJIT_INST_1c
+#undef ASMJIT_INST_2x
+#undef ASMJIT_INST_2i
+#undef ASMJIT_INST_2c
+#undef ASMJIT_INST_3x
+#undef ASMJIT_INST_3i
+#undef ASMJIT_INST_3ii
+#undef ASMJIT_INST_4x
+#undef ASMJIT_INST_4i
+#undef ASMJIT_INST_4ii
+#undef ASMJIT_INST_5x
+#undef ASMJIT_INST_5i
+#undef ASMJIT_INST_6x
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86EMITTER_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86features.cpp b/3rdparty/asmjit/src/asmjit/x86/x86features.cpp
new file mode 100644
index 00000000000..6ee5772374b
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86features.cpp
@@ -0,0 +1,393 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#if defined(ASMJIT_BUILD_X86) && ASMJIT_ARCH_X86
+
+#include "../core/cpuinfo.h"
+#include "../core/support.h"
+#include "../x86/x86features.h"
+
+// Required by `__cpuidex()` and `_xgetbv()`.
+#if defined(_MSC_VER)
+ #include <intrin.h>
+#endif
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::Features - Detect]
+// ============================================================================
+
+struct cpuid_t { uint32_t eax, ebx, ecx, edx; };
+struct xgetbv_t { uint32_t eax, edx; };
+
+// Executes `cpuid` instruction.
+static inline void cpuidQuery(cpuid_t* out, uint32_t inEax, uint32_t inEcx = 0) noexcept {
+#if defined(_MSC_VER)
+ __cpuidex(reinterpret_cast<int*>(out), inEax, inEcx);
+#elif defined(__GNUC__) && ASMJIT_ARCH_X86 == 32
+ __asm__ __volatile__(
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n" : "=a"(out->eax), "=D"(out->ebx), "=c"(out->ecx), "=d"(out->edx) : "a"(inEax), "c"(inEcx));
+#elif defined(__GNUC__) && ASMJIT_ARCH_X86 == 64
+ __asm__ __volatile__(
+ "mov %%rbx, %%rdi\n"
+ "cpuid\n"
+ "xchg %%rdi, %%rbx\n" : "=a"(out->eax), "=D"(out->ebx), "=c"(out->ecx), "=d"(out->edx) : "a"(inEax), "c"(inEcx));
+#else
+ #error "[asmjit] x86::cpuidQuery() - Unsupported compiler."
+#endif
+}
+
+// Executes 'xgetbv' instruction.
+static inline void xgetbvQuery(xgetbv_t* out, uint32_t inEcx) noexcept {
+#if defined(_MSC_VER)
+ uint64_t value = _xgetbv(inEcx);
+ out->eax = uint32_t(value & 0xFFFFFFFFu);
+ out->edx = uint32_t(value >> 32);
+#elif defined(__GNUC__)
+ uint32_t outEax;
+ uint32_t outEdx;
+
+ // Replaced, because the world is not perfect:
+ // __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
+ __asm__ __volatile__(".byte 0x0F, 0x01, 0xD0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
+
+ out->eax = outEax;
+ out->edx = outEdx;
+#else
+ out->eax = 0;
+ out->edx = 0;
+#endif
+}
+
+// Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
+static inline void simplifyCpuVendor(CpuInfo& cpu, uint32_t d0, uint32_t d1, uint32_t d2) noexcept {
+ struct Vendor {
+ char normalized[8];
+ union { char text[12]; uint32_t d[3]; };
+ };
+
+ static const Vendor table[] = {
+ { { 'A', 'M', 'D' }, {{ 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' }} },
+ { { 'I', 'N', 'T', 'E', 'L' }, {{ 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' }} },
+ { { 'V', 'I', 'A' }, {{ 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' }} },
+ { { 'V', 'I', 'A' }, {{ 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 }} },
+ { { 'U', 'N', 'K', 'N', 'O', 'W', 'N' }, {{ 0 }} }
+ };
+
+ uint32_t i;
+ for (i = 0; i < ASMJIT_ARRAY_SIZE(table) - 1; i++)
+ if (table[i].d[0] == d0 && table[i].d[1] == d1 && table[i].d[2] == d2)
+ break;
+ memcpy(cpu._vendor.str, table[i].normalized, 8);
+}
+
+static inline void simplifyCpuBrand(char* s) noexcept {
+ // Used to always clear the current character to ensure that the result
+ // doesn't contain garbage after the new zero terminator.
+ char* d = s;
+
+ char prev = 0;
+ char curr = s[0];
+ s[0] = '\0';
+
+ for (;;) {
+ if (curr == 0)
+ break;
+
+ if (!(curr == ' ' && (prev == '@' || s[1] == ' ' || s[1] == '@')))
+ *d++ = prev = curr;
+
+ curr = *++s;
+ s[0] = '\0';
+ }
+
+ d[0] = '\0';
+}
+
+ASMJIT_FAVOR_SIZE void detectCpu(CpuInfo& cpu) noexcept {
+ using Support::bitTest;
+
+ cpuid_t regs;
+ xgetbv_t xcr0 { 0, 0 };
+ Features& features = cpu._features.as<Features>();
+
+ cpu.reset();
+ cpu._archInfo.init(ArchInfo::kIdHost);
+ cpu._maxLogicalProcessors = 1;
+ features.add(Features::kI486);
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x0]
+ // --------------------------------------------------------------------------
+
+ // Get vendor string/id.
+ cpuidQuery(&regs, 0x0);
+
+ uint32_t maxId = regs.eax;
+ simplifyCpuVendor(cpu, regs.ebx, regs.edx, regs.ecx);
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x1]
+ // --------------------------------------------------------------------------
+
+ if (maxId >= 0x1) {
+ // Get feature flags in ECX/EDX and family/model in EAX.
+ cpuidQuery(&regs, 0x1);
+
+ // Fill family and model fields.
+ uint32_t modelId = (regs.eax >> 4) & 0x0F;
+ uint32_t familyId = (regs.eax >> 8) & 0x0F;
+
+ // Use extended family and model fields.
+ if (familyId == 0x06u || familyId == 0x0Fu)
+ modelId += (((regs.eax >> 16) & 0x0Fu) << 4);
+
+ if (familyId == 0x0Fu)
+ familyId += (((regs.eax >> 20) & 0xFFu) << 4);
+
+ cpu._modelId = modelId;
+ cpu._familyId = familyId;
+ cpu._brandId = ((regs.ebx ) & 0xFF);
+ cpu._processorType = ((regs.eax >> 12) & 0x03);
+ cpu._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
+ cpu._stepping = ((regs.eax ) & 0x0F);
+ cpu._cacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
+
+ if (bitTest(regs.ecx, 0)) features.add(Features::kSSE3);
+ if (bitTest(regs.ecx, 1)) features.add(Features::kPCLMULQDQ);
+ if (bitTest(regs.ecx, 3)) features.add(Features::kMONITOR);
+ if (bitTest(regs.ecx, 5)) features.add(Features::kVMX);
+ if (bitTest(regs.ecx, 6)) features.add(Features::kSMX);
+ if (bitTest(regs.ecx, 9)) features.add(Features::kSSSE3);
+ if (bitTest(regs.ecx, 13)) features.add(Features::kCMPXCHG16B);
+ if (bitTest(regs.ecx, 19)) features.add(Features::kSSE4_1);
+ if (bitTest(regs.ecx, 20)) features.add(Features::kSSE4_2);
+ if (bitTest(regs.ecx, 22)) features.add(Features::kMOVBE);
+ if (bitTest(regs.ecx, 23)) features.add(Features::kPOPCNT);
+ if (bitTest(regs.ecx, 25)) features.add(Features::kAESNI);
+ if (bitTest(regs.ecx, 26)) features.add(Features::kXSAVE);
+ if (bitTest(regs.ecx, 27)) features.add(Features::kOSXSAVE);
+ if (bitTest(regs.ecx, 30)) features.add(Features::kRDRAND);
+ if (bitTest(regs.edx, 0)) features.add(Features::kFPU);
+ if (bitTest(regs.edx, 4)) features.add(Features::kRDTSC);
+ if (bitTest(regs.edx, 5)) features.add(Features::kMSR);
+ if (bitTest(regs.edx, 8)) features.add(Features::kCMPXCHG8B);
+ if (bitTest(regs.edx, 15)) features.add(Features::kCMOV);
+ if (bitTest(regs.edx, 19)) features.add(Features::kCLFLUSH);
+ if (bitTest(regs.edx, 23)) features.add(Features::kMMX);
+ if (bitTest(regs.edx, 24)) features.add(Features::kFXSR);
+ if (bitTest(regs.edx, 25)) features.add(Features::kSSE, Features::kMMX2);
+ if (bitTest(regs.edx, 26)) features.add(Features::kSSE, Features::kSSE2);
+ if (bitTest(regs.edx, 28)) features.add(Features::kMT);
+
+ // Get the content of XCR0 if supported by CPU and enabled by OS.
+ if ((regs.ecx & 0x0C000000u) == 0x0C000000u) {
+ xgetbvQuery(&xcr0, 0);
+ }
+
+ // Detect AVX+.
+ if (bitTest(regs.ecx, 28)) {
+ // - XCR0[2:1] == 11b
+ // XMM & YMM states need to be enabled by OS.
+ if ((xcr0.eax & 0x00000006u) == 0x00000006u) {
+ features.add(Features::kAVX);
+
+ if (bitTest(regs.ecx, 12)) features.add(Features::kFMA);
+ if (bitTest(regs.ecx, 29)) features.add(Features::kF16C);
+ }
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x7]
+ // --------------------------------------------------------------------------
+
+ // Detect new features if the processor supports CPUID-07.
+ bool maybeMPX = false;
+
+ if (maxId >= 0x7) {
+ cpuidQuery(&regs, 0x7);
+ uint32_t maxSubLeafId = regs.eax;
+
+ if (bitTest(regs.ebx, 0)) features.add(Features::kFSGSBASE);
+ if (bitTest(regs.ebx, 3)) features.add(Features::kBMI);
+ if (bitTest(regs.ebx, 4)) features.add(Features::kHLE);
+ if (bitTest(regs.ebx, 7)) features.add(Features::kSMEP);
+ if (bitTest(regs.ebx, 8)) features.add(Features::kBMI2);
+ if (bitTest(regs.ebx, 9)) features.add(Features::kERMS);
+ if (bitTest(regs.ebx, 11)) features.add(Features::kRTM);
+ if (bitTest(regs.ebx, 14)) maybeMPX = true;
+ if (bitTest(regs.ebx, 18)) features.add(Features::kRDSEED);
+ if (bitTest(regs.ebx, 19)) features.add(Features::kADX);
+ if (bitTest(regs.ebx, 20)) features.add(Features::kSMAP);
+ if (bitTest(regs.ebx, 22)) features.add(Features::kPCOMMIT);
+ if (bitTest(regs.ebx, 23)) features.add(Features::kCLFLUSHOPT);
+ if (bitTest(regs.ebx, 24)) features.add(Features::kCLWB);
+ if (bitTest(regs.ebx, 29)) features.add(Features::kSHA);
+ if (bitTest(regs.ecx, 0)) features.add(Features::kPREFETCHWT1);
+ if (bitTest(regs.ecx, 22)) features.add(Features::kRDPID);
+ if (bitTest(regs.ecx, 25)) features.add(Features::kCLDEMOTE);
+ if (bitTest(regs.ecx, 27)) features.add(Features::kMOVDIRI);
+ if (bitTest(regs.ecx, 28)) features.add(Features::kMOVDIR64B);
+ if (bitTest(regs.ecx, 29)) features.add(Features::kENQCMD);
+ if (bitTest(regs.edx, 18)) features.add(Features::kPCONFIG);
+
+ // Detect 'TSX' - Requires at least one of `HLE` and `RTM` features.
+ if (features.hasHLE() || features.hasRTM())
+ features.add(Features::kTSX);
+
+ // Detect 'AVX2' - Requires AVX as well.
+ if (bitTest(regs.ebx, 5) && features.hasAVX())
+ features.add(Features::kAVX2);
+
+ // Detect 'AVX_512'.
+ if (bitTest(regs.ebx, 16)) {
+ // - XCR0[2:1] == 11b - XMM/YMM states need to be enabled by OS.
+ // - XCR0[7:5] == 111b - Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by OS.
+ if ((xcr0.eax & 0x000000E6u) == 0x000000E6u) {
+ features.add(Features::kAVX512_F);
+
+ if (bitTest(regs.ebx, 17)) features.add(Features::kAVX512_DQ);
+ if (bitTest(regs.ebx, 21)) features.add(Features::kAVX512_IFMA);
+ if (bitTest(regs.ebx, 26)) features.add(Features::kAVX512_PFI);
+ if (bitTest(regs.ebx, 27)) features.add(Features::kAVX512_ERI);
+ if (bitTest(regs.ebx, 28)) features.add(Features::kAVX512_CDI);
+ if (bitTest(regs.ebx, 30)) features.add(Features::kAVX512_BW);
+ if (bitTest(regs.ebx, 31)) features.add(Features::kAVX512_VL);
+ if (bitTest(regs.ecx, 1)) features.add(Features::kAVX512_VBMI);
+ if (bitTest(regs.ecx, 5)) features.add(Features::kWAITPKG);
+ if (bitTest(regs.ecx, 6)) features.add(Features::kAVX512_VBMI2);
+ if (bitTest(regs.ecx, 8)) features.add(Features::kGFNI);
+ if (bitTest(regs.ecx, 9)) features.add(Features::kVAES);
+ if (bitTest(regs.ecx, 10)) features.add(Features::kVPCLMULQDQ);
+ if (bitTest(regs.ecx, 11)) features.add(Features::kAVX512_VNNI);
+ if (bitTest(regs.ecx, 12)) features.add(Features::kAVX512_BITALG);
+ if (bitTest(regs.ecx, 14)) features.add(Features::kAVX512_VPOPCNTDQ);
+ if (bitTest(regs.edx, 2)) features.add(Features::kAVX512_4VNNIW);
+ if (bitTest(regs.edx, 3)) features.add(Features::kAVX512_4FMAPS);
+ if (bitTest(regs.edx, 8)) features.add(Features::kAVX512_VP2INTERSECT);
+ }
+ }
+
+ if (maxSubLeafId >= 1 && features.hasAVX512_F()) {
+ cpuidQuery(&regs, 0x7, 1);
+
+ if (bitTest(regs.eax, 5)) features.add(Features::kAVX512_BF16);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0xD]
+ // --------------------------------------------------------------------------
+
+ if (maxId >= 0xD) {
+ cpuidQuery(&regs, 0xD, 0);
+
+ // Both CPUID result and XCR0 has to be enabled to have support for MPX.
+ if (((regs.eax & xcr0.eax) & 0x00000018u) == 0x00000018u && maybeMPX)
+ features.add(Features::kMPX);
+
+ cpuidQuery(&regs, 0xD, 1);
+ if (bitTest(regs.eax, 0)) features.add(Features::kXSAVEOPT);
+ if (bitTest(regs.eax, 1)) features.add(Features::kXSAVEC);
+ if (bitTest(regs.eax, 3)) features.add(Features::kXSAVES);
+ }
+
+ // --------------------------------------------------------------------------
+ // [CPUID EAX=0x80000000...maxId]
+ // --------------------------------------------------------------------------
+
+ maxId = 0x80000000u;
+ uint32_t i = maxId;
+
+ // The highest EAX that we understand.
+ uint32_t kHighestProcessedEAX = 0x80000008u;
+
+ // Several CPUID calls are required to get the whole branc string. It's easy
+ // to copy one DWORD at a time instead of performing a byte copy.
+ uint32_t* brand = cpu._brand.u32;
+ do {
+ cpuidQuery(&regs, i);
+ switch (i) {
+ case 0x80000000u:
+ maxId = Support::min<uint32_t>(regs.eax, kHighestProcessedEAX);
+ break;
+
+ case 0x80000001u:
+ if (bitTest(regs.ecx, 0)) features.add(Features::kLAHFSAHF);
+ if (bitTest(regs.ecx, 2)) features.add(Features::kSVM);
+ if (bitTest(regs.ecx, 5)) features.add(Features::kLZCNT);
+ if (bitTest(regs.ecx, 6)) features.add(Features::kSSE4A);
+ if (bitTest(regs.ecx, 7)) features.add(Features::kMSSE);
+ if (bitTest(regs.ecx, 8)) features.add(Features::kPREFETCHW);
+ if (bitTest(regs.ecx, 12)) features.add(Features::kSKINIT);
+ if (bitTest(regs.ecx, 15)) features.add(Features::kLWP);
+ if (bitTest(regs.ecx, 21)) features.add(Features::kTBM);
+ if (bitTest(regs.ecx, 29)) features.add(Features::kMONITORX);
+ if (bitTest(regs.edx, 20)) features.add(Features::kNX);
+ if (bitTest(regs.edx, 21)) features.add(Features::kFXSROPT);
+ if (bitTest(regs.edx, 22)) features.add(Features::kMMX2);
+ if (bitTest(regs.edx, 27)) features.add(Features::kRDTSCP);
+ if (bitTest(regs.edx, 30)) features.add(Features::k3DNOW2, Features::kMMX2);
+ if (bitTest(regs.edx, 31)) features.add(Features::k3DNOW);
+
+ if (cpu.hasFeature(Features::kAVX)) {
+ if (bitTest(regs.ecx, 11)) features.add(Features::kXOP);
+ if (bitTest(regs.ecx, 16)) features.add(Features::kFMA4);
+ }
+
+ // These seem to be only supported by AMD.
+ if (cpu.isVendor("AMD")) {
+ if (bitTest(regs.ecx, 4)) features.add(Features::kALTMOVCR8);
+ }
+ break;
+
+ case 0x80000002u:
+ case 0x80000003u:
+ case 0x80000004u:
+ *brand++ = regs.eax;
+ *brand++ = regs.ebx;
+ *brand++ = regs.ecx;
+ *brand++ = regs.edx;
+
+ // Go directly to the last one.
+ if (i == 0x80000004u) i = 0x80000008u - 1;
+ break;
+
+ case 0x80000008u:
+ if (bitTest(regs.ebx, 0)) features.add(Features::kCLZERO);
+ break;
+ }
+ } while (++i <= maxId);
+
+ // Simplify CPU brand string a bit by removing some unnecessary spaces.
+ simplifyCpuBrand(cpu._brand.str);
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86 && ASMJIT_ARCH_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86features.h b/3rdparty/asmjit/src/asmjit/x86/x86features.h
new file mode 100644
index 00000000000..d73c063877f
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86features.h
@@ -0,0 +1,286 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86FEATURES_H_INCLUDED
+#define ASMJIT_X86_X86FEATURES_H_INCLUDED
+
+#include "../core/features.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::Features]
+// ============================================================================
+
+//! CPU features (X86).
+class Features : public BaseFeatures {
+public:
+ //! CPU feature ID.
+ enum Id : uint32_t {
+ kNone = 0, //!< No feature (never set, used internally).
+
+ kMT, //!< CPU has multi-threading capabilities.
+ kNX, //!< CPU has Not-Execute-Bit aka DEP (data-execution prevention).
+
+ k3DNOW, //!< CPU has 3DNOW (3DNOW base instructions) [AMD].
+ k3DNOW2, //!< CPU has 3DNOW2 (enhanced 3DNOW) [AMD].
+ kADX, //!< CPU has ADX (multi-precision add-carry instruction extensions).
+ kAESNI, //!< CPU has AESNI (AES encode/decode instructions).
+ kALTMOVCR8, //!< CPU has LOCK MOV R<->CR0 (supports `MOV R<->CR8` via `LOCK MOV R<->CR0` in 32-bit mode) [AMD].
+ kAVX, //!< CPU has AVX (advanced vector extensions).
+ kAVX2, //!< CPU has AVX2 (advanced vector extensions 2).
+ kAVX512_4FMAPS, //!< CPU has AVX512_FMAPS (FMA packed single).
+ kAVX512_4VNNIW, //!< CPU has AVX512_VNNIW (vector NN instructions word variable precision).
+ kAVX512_BF16, //!< CPU has AVX512_BF16 (BFLOAT16 support instruction).
+ kAVX512_BITALG, //!< CPU has AVX512_BITALG (VPOPCNT[B|W], VPSHUFBITQMB).
+ kAVX512_BW, //!< CPU has AVX512_BW (packed BYTE|WORD).
+ kAVX512_CDI, //!< CPU has AVX512_CDI (conflict detection).
+ kAVX512_DQ, //!< CPU has AVX512_DQ (packed DWORD|QWORD).
+ kAVX512_ERI, //!< CPU has AVX512_ERI (exponential and reciprocal).
+ kAVX512_F, //!< CPU has AVX512_F (AVX512 foundation).
+ kAVX512_IFMA, //!< CPU has AVX512_IFMA (integer fused-multiply-add using 52-bit precision).
+ kAVX512_PFI, //!< CPU has AVX512_PFI (prefetch instructions).
+ kAVX512_VBMI, //!< CPU has AVX512_VBMI (vector byte manipulation).
+ kAVX512_VBMI2, //!< CPU has AVX512_VBMI2 (vector byte manipulation 2).
+ kAVX512_VL, //!< CPU has AVX512_VL (vector length extensions).
+ kAVX512_VNNI, //!< CPU has AVX512_VNNI (vector neural network instructions).
+ kAVX512_VP2INTERSECT, //!< CPU has AVX512_VP2INTERSECT
+ kAVX512_VPOPCNTDQ, //!< CPU has AVX512_VPOPCNTDQ (VPOPCNT[D|Q] instructions).
+ kBMI, //!< CPU has BMI (bit manipulation instructions #1).
+ kBMI2, //!< CPU has BMI2 (bit manipulation instructions #2).
+ kCLDEMOTE, //!< CPU has CLDEMOTE (cache line demote).
+ kCLFLUSH, //!< CPU has CLFUSH (Cache Line flush).
+ kCLFLUSHOPT, //!< CPU has CLFUSHOPT (Cache Line flush - optimized).
+ kCLWB, //!< CPU has CLWB.
+ kCLZERO, //!< CPU has CLZERO.
+ kCMOV, //!< CPU has CMOV (CMOV and FCMOV instructions).
+ kCMPXCHG16B, //!< CPU has CMPXCHG16B (compare-exchange 16 bytes) [X86_64].
+ kCMPXCHG8B, //!< CPU has CMPXCHG8B (compare-exchange 8 bytes).
+ kENCLV, //!< CPU has ENCLV.
+ kENQCMD, //!< CPU has ENQCMD (enqueue stores).
+ kERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
+ kF16C, //!< CPU has F16C.
+ kFMA, //!< CPU has FMA (fused-multiply-add 3 operand form).
+ kFMA4, //!< CPU has FMA4 (fused-multiply-add 4 operand form).
+ kFPU, //!< CPU has FPU (FPU support).
+ kFSGSBASE, //!< CPU has FSGSBASE.
+ kFXSR, //!< CPU has FXSR (FXSAVE/FXRSTOR instructions).
+ kFXSROPT, //!< CPU has FXSROTP (FXSAVE/FXRSTOR is optimized).
+ kGEODE, //!< CPU has GEODE extensions (3DNOW additions).
+ kGFNI, //!< CPU has GFNI (Galois field instructions).
+ kHLE, //!< CPU has HLE.
+ kI486, //!< CPU has I486 features (I486+ support).
+ kLAHFSAHF, //!< CPU has LAHF/SAHF (LAHF/SAHF in 64-bit mode) [X86_64].
+ kLWP, //!< CPU has LWP (lightweight profiling) [AMD].
+ kLZCNT, //!< CPU has LZCNT (LZCNT instruction).
+ kMMX, //!< CPU has MMX (MMX base instructions).
+ kMMX2, //!< CPU has MMX2 (MMX extensions or MMX2).
+ kMONITOR, //!< CPU has MONITOR (MONITOR/MWAIT instructions).
+ kMONITORX, //!< CPU has MONITORX (MONITORX/MWAITX instructions).
+ kMOVBE, //!< CPU has MOVBE (move with byte-order swap).
+ kMOVDIR64B, //!< CPU has MOVDIR64B (move 64 bytes as direct store).
+ kMOVDIRI, //!< CPU has MOVDIRI (move dword/qword as direct store).
+ kMPX, //!< CPU has MPX (memory protection extensions).
+ kMSR, //!< CPU has MSR (RDMSR/WRMSR instructions).
+ kMSSE, //!< CPU has MSSE (misaligned SSE support).
+ kOSXSAVE, //!< CPU has OSXSAVE (XSAVE enabled by OS).
+ kPCLMULQDQ, //!< CPU has PCLMULQDQ (packed carry-less multiplication).
+ kPCOMMIT, //!< CPU has PCOMMIT (PCOMMIT instruction).
+ kPCONFIG, //!< CPU has PCONFIG (PCONFIG instruction).
+ kPOPCNT, //!< CPU has POPCNT (POPCNT instruction).
+ kPREFETCHW, //!< CPU has PREFETCHW.
+ kPREFETCHWT1, //!< CPU has PREFETCHWT1.
+ kRDPID, //!< CPU has RDPID.
+ kRDRAND, //!< CPU has RDRAND.
+ kRDSEED, //!< CPU has RDSEED.
+ kRDTSC, //!< CPU has RDTSC.
+ kRDTSCP, //!< CPU has RDTSCP.
+ kRTM, //!< CPU has RTM.
+ kSHA, //!< CPU has SHA (SHA-1 and SHA-256 instructions).
+ kSKINIT, //!< CPU has SKINIT (SKINIT/STGI instructions) [AMD].
+ kSMAP, //!< CPU has SMAP (supervisor-mode access prevention).
+ kSMEP, //!< CPU has SMEP (supervisor-mode execution prevention).
+ kSMX, //!< CPU has SMX (safer mode extensions).
+ kSSE, //!< CPU has SSE.
+ kSSE2, //!< CPU has SSE2.
+ kSSE3, //!< CPU has SSE3.
+ kSSE4_1, //!< CPU has SSE4.1.
+ kSSE4_2, //!< CPU has SSE4.2.
+ kSSE4A, //!< CPU has SSE4A [AMD].
+ kSSSE3, //!< CPU has SSSE3.
+ kSVM, //!< CPU has SVM (virtualization) [AMD].
+ kTBM, //!< CPU has TBM (trailing bit manipulation) [AMD].
+ kTSX, //!< CPU has TSX.
+ kVAES, //!< CPU has VAES (vector AES 256|512 bit support).
+ kVMX, //!< CPU has VMX (virtualization) [INTEL].
+ kVPCLMULQDQ, //!< CPU has VPCLMULQDQ (vector PCLMULQDQ 256|512-bit support).
+ kWAITPKG, //!< CPU has WAITPKG (UMONITOR, UMWAIT, TPAUSE).
+ kWBNOINVD, //!< CPU has WBNOINVD.
+ kXOP, //!< CPU has XOP (XOP instructions) [AMD].
+ kXSAVE, //!< CPU has XSAVE.
+ kXSAVEC, //!< CPU has XSAVEC.
+ kXSAVEOPT, //!< CPU has XSAVEOPT.
+ kXSAVES, //!< CPU has XSAVES.
+
+ kCount //!< Count of X86 CPU features.
+ };
+
+ //! \name Construction / Destruction
+ //! \{
+
+ inline Features() noexcept
+ : BaseFeatures() {}
+ inline Features(const Features& other) noexcept
+ : BaseFeatures(other) {}
+
+ //! \}
+
+ //! \name Overloaded Operators
+ //! \{
+
+ inline Features& operator=(const Features& other) noexcept = default;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ #define ASMJIT_X86_FEATURE(FEATURE) \
+ inline bool has##FEATURE() const noexcept { return has(k##FEATURE); }
+
+ ASMJIT_X86_FEATURE(MT)
+ ASMJIT_X86_FEATURE(NX)
+
+ ASMJIT_X86_FEATURE(3DNOW)
+ ASMJIT_X86_FEATURE(3DNOW2)
+ ASMJIT_X86_FEATURE(ADX)
+ ASMJIT_X86_FEATURE(AESNI)
+ ASMJIT_X86_FEATURE(ALTMOVCR8)
+ ASMJIT_X86_FEATURE(AVX)
+ ASMJIT_X86_FEATURE(AVX2)
+ ASMJIT_X86_FEATURE(AVX512_4FMAPS)
+ ASMJIT_X86_FEATURE(AVX512_4VNNIW)
+ ASMJIT_X86_FEATURE(AVX512_BF16)
+ ASMJIT_X86_FEATURE(AVX512_BITALG)
+ ASMJIT_X86_FEATURE(AVX512_BW)
+ ASMJIT_X86_FEATURE(AVX512_CDI)
+ ASMJIT_X86_FEATURE(AVX512_DQ)
+ ASMJIT_X86_FEATURE(AVX512_ERI)
+ ASMJIT_X86_FEATURE(AVX512_F)
+ ASMJIT_X86_FEATURE(AVX512_IFMA)
+ ASMJIT_X86_FEATURE(AVX512_PFI)
+ ASMJIT_X86_FEATURE(AVX512_VBMI)
+ ASMJIT_X86_FEATURE(AVX512_VBMI2)
+ ASMJIT_X86_FEATURE(AVX512_VL)
+ ASMJIT_X86_FEATURE(AVX512_VNNI)
+ ASMJIT_X86_FEATURE(AVX512_VP2INTERSECT)
+ ASMJIT_X86_FEATURE(AVX512_VPOPCNTDQ)
+ ASMJIT_X86_FEATURE(BMI)
+ ASMJIT_X86_FEATURE(BMI2)
+ ASMJIT_X86_FEATURE(CLDEMOTE)
+ ASMJIT_X86_FEATURE(CLFLUSH)
+ ASMJIT_X86_FEATURE(CLFLUSHOPT)
+ ASMJIT_X86_FEATURE(CLWB)
+ ASMJIT_X86_FEATURE(CLZERO)
+ ASMJIT_X86_FEATURE(CMOV)
+ ASMJIT_X86_FEATURE(CMPXCHG16B)
+ ASMJIT_X86_FEATURE(CMPXCHG8B)
+ ASMJIT_X86_FEATURE(ENCLV)
+ ASMJIT_X86_FEATURE(ENQCMD)
+ ASMJIT_X86_FEATURE(ERMS)
+ ASMJIT_X86_FEATURE(F16C)
+ ASMJIT_X86_FEATURE(FMA)
+ ASMJIT_X86_FEATURE(FMA4)
+ ASMJIT_X86_FEATURE(FPU)
+ ASMJIT_X86_FEATURE(FSGSBASE)
+ ASMJIT_X86_FEATURE(FXSR)
+ ASMJIT_X86_FEATURE(FXSROPT)
+ ASMJIT_X86_FEATURE(GEODE)
+ ASMJIT_X86_FEATURE(GFNI)
+ ASMJIT_X86_FEATURE(HLE)
+ ASMJIT_X86_FEATURE(I486)
+ ASMJIT_X86_FEATURE(LAHFSAHF)
+ ASMJIT_X86_FEATURE(LWP)
+ ASMJIT_X86_FEATURE(LZCNT)
+ ASMJIT_X86_FEATURE(MMX)
+ ASMJIT_X86_FEATURE(MMX2)
+ ASMJIT_X86_FEATURE(MONITOR)
+ ASMJIT_X86_FEATURE(MONITORX)
+ ASMJIT_X86_FEATURE(MOVBE)
+ ASMJIT_X86_FEATURE(MOVDIR64B)
+ ASMJIT_X86_FEATURE(MOVDIRI)
+ ASMJIT_X86_FEATURE(MPX)
+ ASMJIT_X86_FEATURE(MSR)
+ ASMJIT_X86_FEATURE(MSSE)
+ ASMJIT_X86_FEATURE(OSXSAVE)
+ ASMJIT_X86_FEATURE(PCLMULQDQ)
+ ASMJIT_X86_FEATURE(PCOMMIT)
+ ASMJIT_X86_FEATURE(PCONFIG)
+ ASMJIT_X86_FEATURE(POPCNT)
+ ASMJIT_X86_FEATURE(PREFETCHW)
+ ASMJIT_X86_FEATURE(PREFETCHWT1)
+ ASMJIT_X86_FEATURE(RDPID)
+ ASMJIT_X86_FEATURE(RDRAND)
+ ASMJIT_X86_FEATURE(RDSEED)
+ ASMJIT_X86_FEATURE(RDTSC)
+ ASMJIT_X86_FEATURE(RDTSCP)
+ ASMJIT_X86_FEATURE(RTM)
+ ASMJIT_X86_FEATURE(SHA)
+ ASMJIT_X86_FEATURE(SKINIT)
+ ASMJIT_X86_FEATURE(SMAP)
+ ASMJIT_X86_FEATURE(SMEP)
+ ASMJIT_X86_FEATURE(SMX)
+ ASMJIT_X86_FEATURE(SSE)
+ ASMJIT_X86_FEATURE(SSE2)
+ ASMJIT_X86_FEATURE(SSE3)
+ ASMJIT_X86_FEATURE(SSSE3)
+ ASMJIT_X86_FEATURE(SSE4A)
+ ASMJIT_X86_FEATURE(SSE4_1)
+ ASMJIT_X86_FEATURE(SSE4_2)
+ ASMJIT_X86_FEATURE(SVM)
+ ASMJIT_X86_FEATURE(TBM)
+ ASMJIT_X86_FEATURE(TSX)
+ ASMJIT_X86_FEATURE(XSAVE)
+ ASMJIT_X86_FEATURE(XSAVEC)
+ ASMJIT_X86_FEATURE(XSAVEOPT)
+ ASMJIT_X86_FEATURE(XSAVES)
+ ASMJIT_X86_FEATURE(VAES)
+ ASMJIT_X86_FEATURE(VMX)
+ ASMJIT_X86_FEATURE(VPCLMULQDQ)
+ ASMJIT_X86_FEATURE(WAITPKG)
+ ASMJIT_X86_FEATURE(WBNOINVD)
+ ASMJIT_X86_FEATURE(XOP)
+
+ #undef ASMJIT_X86_FEATURE
+
+ //! \}
+};
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86FEATURES_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86globals.h b/3rdparty/asmjit/src/asmjit/x86/x86globals.h
new file mode 100644
index 00000000000..dca2b95732c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86globals.h
@@ -0,0 +1,2039 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86GLOBALS_H_INCLUDED
+#define ASMJIT_X86_X86GLOBALS_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/inst.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \namespace asmjit::x86
+//! \ingroup asmjit_x86
+//!
+//! X86/X64 API.
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::Inst]
+// ============================================================================
+
+//! Instruction.
+//!
+//! \note Only used to hold x86-specific enumerations and static functions.
+struct Inst : public BaseInst {
+ //! Instruction id.
+ enum Id : uint32_t {
+ // ${InstId:Begin}
+ kIdNone = 0, //!< Invalid instruction id.
+ kIdAaa, //!< Instruction 'aaa' (X86).
+ kIdAad, //!< Instruction 'aad' (X86).
+ kIdAam, //!< Instruction 'aam' (X86).
+ kIdAas, //!< Instruction 'aas' (X86).
+ kIdAdc, //!< Instruction 'adc'.
+ kIdAdcx, //!< Instruction 'adcx' {ADX}.
+ kIdAdd, //!< Instruction 'add'.
+ kIdAddpd, //!< Instruction 'addpd' {SSE2}.
+ kIdAddps, //!< Instruction 'addps' {SSE}.
+ kIdAddsd, //!< Instruction 'addsd' {SSE2}.
+ kIdAddss, //!< Instruction 'addss' {SSE}.
+ kIdAddsubpd, //!< Instruction 'addsubpd' {SSE3}.
+ kIdAddsubps, //!< Instruction 'addsubps' {SSE3}.
+ kIdAdox, //!< Instruction 'adox' {ADX}.
+ kIdAesdec, //!< Instruction 'aesdec' {AESNI}.
+ kIdAesdeclast, //!< Instruction 'aesdeclast' {AESNI}.
+ kIdAesenc, //!< Instruction 'aesenc' {AESNI}.
+ kIdAesenclast, //!< Instruction 'aesenclast' {AESNI}.
+ kIdAesimc, //!< Instruction 'aesimc' {AESNI}.
+ kIdAeskeygenassist, //!< Instruction 'aeskeygenassist' {AESNI}.
+ kIdAnd, //!< Instruction 'and'.
+ kIdAndn, //!< Instruction 'andn' {BMI}.
+ kIdAndnpd, //!< Instruction 'andnpd' {SSE2}.
+ kIdAndnps, //!< Instruction 'andnps' {SSE}.
+ kIdAndpd, //!< Instruction 'andpd' {SSE2}.
+ kIdAndps, //!< Instruction 'andps' {SSE}.
+ kIdArpl, //!< Instruction 'arpl' (X86).
+ kIdBextr, //!< Instruction 'bextr' {BMI}.
+ kIdBlcfill, //!< Instruction 'blcfill' {TBM}.
+ kIdBlci, //!< Instruction 'blci' {TBM}.
+ kIdBlcic, //!< Instruction 'blcic' {TBM}.
+ kIdBlcmsk, //!< Instruction 'blcmsk' {TBM}.
+ kIdBlcs, //!< Instruction 'blcs' {TBM}.
+ kIdBlendpd, //!< Instruction 'blendpd' {SSE4_1}.
+ kIdBlendps, //!< Instruction 'blendps' {SSE4_1}.
+ kIdBlendvpd, //!< Instruction 'blendvpd' {SSE4_1}.
+ kIdBlendvps, //!< Instruction 'blendvps' {SSE4_1}.
+ kIdBlsfill, //!< Instruction 'blsfill' {TBM}.
+ kIdBlsi, //!< Instruction 'blsi' {BMI}.
+ kIdBlsic, //!< Instruction 'blsic' {TBM}.
+ kIdBlsmsk, //!< Instruction 'blsmsk' {BMI}.
+ kIdBlsr, //!< Instruction 'blsr' {BMI}.
+ kIdBndcl, //!< Instruction 'bndcl' {MPX}.
+ kIdBndcn, //!< Instruction 'bndcn' {MPX}.
+ kIdBndcu, //!< Instruction 'bndcu' {MPX}.
+ kIdBndldx, //!< Instruction 'bndldx' {MPX}.
+ kIdBndmk, //!< Instruction 'bndmk' {MPX}.
+ kIdBndmov, //!< Instruction 'bndmov' {MPX}.
+ kIdBndstx, //!< Instruction 'bndstx' {MPX}.
+ kIdBound, //!< Instruction 'bound' (X86).
+ kIdBsf, //!< Instruction 'bsf'.
+ kIdBsr, //!< Instruction 'bsr'.
+ kIdBswap, //!< Instruction 'bswap'.
+ kIdBt, //!< Instruction 'bt'.
+ kIdBtc, //!< Instruction 'btc'.
+ kIdBtr, //!< Instruction 'btr'.
+ kIdBts, //!< Instruction 'bts'.
+ kIdBzhi, //!< Instruction 'bzhi' {BMI2}.
+ kIdCall, //!< Instruction 'call'.
+ kIdCbw, //!< Instruction 'cbw'.
+ kIdCdq, //!< Instruction 'cdq'.
+ kIdCdqe, //!< Instruction 'cdqe' (X64).
+ kIdClac, //!< Instruction 'clac' {SMAP}.
+ kIdClc, //!< Instruction 'clc'.
+ kIdCld, //!< Instruction 'cld'.
+ kIdCldemote, //!< Instruction 'cldemote' {CLDEMOTE}.
+ kIdClflush, //!< Instruction 'clflush' {CLFLUSH}.
+ kIdClflushopt, //!< Instruction 'clflushopt' {CLFLUSHOPT}.
+ kIdClgi, //!< Instruction 'clgi' {SVM}.
+ kIdCli, //!< Instruction 'cli'.
+ kIdClts, //!< Instruction 'clts'.
+ kIdClwb, //!< Instruction 'clwb' {CLWB}.
+ kIdClzero, //!< Instruction 'clzero' {CLZERO}.
+ kIdCmc, //!< Instruction 'cmc'.
+ kIdCmova, //!< Instruction 'cmova' {CMOV}.
+ kIdCmovae, //!< Instruction 'cmovae' {CMOV}.
+ kIdCmovb, //!< Instruction 'cmovb' {CMOV}.
+ kIdCmovbe, //!< Instruction 'cmovbe' {CMOV}.
+ kIdCmovc, //!< Instruction 'cmovc' {CMOV}.
+ kIdCmove, //!< Instruction 'cmove' {CMOV}.
+ kIdCmovg, //!< Instruction 'cmovg' {CMOV}.
+ kIdCmovge, //!< Instruction 'cmovge' {CMOV}.
+ kIdCmovl, //!< Instruction 'cmovl' {CMOV}.
+ kIdCmovle, //!< Instruction 'cmovle' {CMOV}.
+ kIdCmovna, //!< Instruction 'cmovna' {CMOV}.
+ kIdCmovnae, //!< Instruction 'cmovnae' {CMOV}.
+ kIdCmovnb, //!< Instruction 'cmovnb' {CMOV}.
+ kIdCmovnbe, //!< Instruction 'cmovnbe' {CMOV}.
+ kIdCmovnc, //!< Instruction 'cmovnc' {CMOV}.
+ kIdCmovne, //!< Instruction 'cmovne' {CMOV}.
+ kIdCmovng, //!< Instruction 'cmovng' {CMOV}.
+ kIdCmovnge, //!< Instruction 'cmovnge' {CMOV}.
+ kIdCmovnl, //!< Instruction 'cmovnl' {CMOV}.
+ kIdCmovnle, //!< Instruction 'cmovnle' {CMOV}.
+ kIdCmovno, //!< Instruction 'cmovno' {CMOV}.
+ kIdCmovnp, //!< Instruction 'cmovnp' {CMOV}.
+ kIdCmovns, //!< Instruction 'cmovns' {CMOV}.
+ kIdCmovnz, //!< Instruction 'cmovnz' {CMOV}.
+ kIdCmovo, //!< Instruction 'cmovo' {CMOV}.
+ kIdCmovp, //!< Instruction 'cmovp' {CMOV}.
+ kIdCmovpe, //!< Instruction 'cmovpe' {CMOV}.
+ kIdCmovpo, //!< Instruction 'cmovpo' {CMOV}.
+ kIdCmovs, //!< Instruction 'cmovs' {CMOV}.
+ kIdCmovz, //!< Instruction 'cmovz' {CMOV}.
+ kIdCmp, //!< Instruction 'cmp'.
+ kIdCmppd, //!< Instruction 'cmppd' {SSE2}.
+ kIdCmpps, //!< Instruction 'cmpps' {SSE}.
+ kIdCmps, //!< Instruction 'cmps'.
+ kIdCmpsd, //!< Instruction 'cmpsd' {SSE2}.
+ kIdCmpss, //!< Instruction 'cmpss' {SSE}.
+ kIdCmpxchg, //!< Instruction 'cmpxchg' {I486}.
+ kIdCmpxchg16b, //!< Instruction 'cmpxchg16b' {CMPXCHG16B} (X64).
+ kIdCmpxchg8b, //!< Instruction 'cmpxchg8b' {CMPXCHG8B}.
+ kIdComisd, //!< Instruction 'comisd' {SSE2}.
+ kIdComiss, //!< Instruction 'comiss' {SSE}.
+ kIdCpuid, //!< Instruction 'cpuid' {I486}.
+ kIdCqo, //!< Instruction 'cqo' (X64).
+ kIdCrc32, //!< Instruction 'crc32' {SSE4_2}.
+ kIdCvtdq2pd, //!< Instruction 'cvtdq2pd' {SSE2}.
+ kIdCvtdq2ps, //!< Instruction 'cvtdq2ps' {SSE2}.
+ kIdCvtpd2dq, //!< Instruction 'cvtpd2dq' {SSE2}.
+ kIdCvtpd2pi, //!< Instruction 'cvtpd2pi' {SSE2}.
+ kIdCvtpd2ps, //!< Instruction 'cvtpd2ps' {SSE2}.
+ kIdCvtpi2pd, //!< Instruction 'cvtpi2pd' {SSE2}.
+ kIdCvtpi2ps, //!< Instruction 'cvtpi2ps' {SSE}.
+ kIdCvtps2dq, //!< Instruction 'cvtps2dq' {SSE2}.
+ kIdCvtps2pd, //!< Instruction 'cvtps2pd' {SSE2}.
+ kIdCvtps2pi, //!< Instruction 'cvtps2pi' {SSE}.
+ kIdCvtsd2si, //!< Instruction 'cvtsd2si' {SSE2}.
+ kIdCvtsd2ss, //!< Instruction 'cvtsd2ss' {SSE2}.
+ kIdCvtsi2sd, //!< Instruction 'cvtsi2sd' {SSE2}.
+ kIdCvtsi2ss, //!< Instruction 'cvtsi2ss' {SSE}.
+ kIdCvtss2sd, //!< Instruction 'cvtss2sd' {SSE2}.
+ kIdCvtss2si, //!< Instruction 'cvtss2si' {SSE}.
+ kIdCvttpd2dq, //!< Instruction 'cvttpd2dq' {SSE2}.
+ kIdCvttpd2pi, //!< Instruction 'cvttpd2pi' {SSE2}.
+ kIdCvttps2dq, //!< Instruction 'cvttps2dq' {SSE2}.
+ kIdCvttps2pi, //!< Instruction 'cvttps2pi' {SSE}.
+ kIdCvttsd2si, //!< Instruction 'cvttsd2si' {SSE2}.
+ kIdCvttss2si, //!< Instruction 'cvttss2si' {SSE}.
+ kIdCwd, //!< Instruction 'cwd'.
+ kIdCwde, //!< Instruction 'cwde'.
+ kIdDaa, //!< Instruction 'daa' (X86).
+ kIdDas, //!< Instruction 'das' (X86).
+ kIdDec, //!< Instruction 'dec'.
+ kIdDiv, //!< Instruction 'div'.
+ kIdDivpd, //!< Instruction 'divpd' {SSE2}.
+ kIdDivps, //!< Instruction 'divps' {SSE}.
+ kIdDivsd, //!< Instruction 'divsd' {SSE2}.
+ kIdDivss, //!< Instruction 'divss' {SSE}.
+ kIdDppd, //!< Instruction 'dppd' {SSE4_1}.
+ kIdDpps, //!< Instruction 'dpps' {SSE4_1}.
+ kIdEmms, //!< Instruction 'emms' {MMX}.
+ kIdEnqcmd, //!< Instruction 'enqcmd' {ENQCMD}.
+ kIdEnqcmds, //!< Instruction 'enqcmds' {ENQCMD}.
+ kIdEnter, //!< Instruction 'enter'.
+ kIdExtractps, //!< Instruction 'extractps' {SSE4_1}.
+ kIdExtrq, //!< Instruction 'extrq' {SSE4A}.
+ kIdF2xm1, //!< Instruction 'f2xm1'.
+ kIdFabs, //!< Instruction 'fabs'.
+ kIdFadd, //!< Instruction 'fadd'.
+ kIdFaddp, //!< Instruction 'faddp'.
+ kIdFbld, //!< Instruction 'fbld'.
+ kIdFbstp, //!< Instruction 'fbstp'.
+ kIdFchs, //!< Instruction 'fchs'.
+ kIdFclex, //!< Instruction 'fclex'.
+ kIdFcmovb, //!< Instruction 'fcmovb' {CMOV}.
+ kIdFcmovbe, //!< Instruction 'fcmovbe' {CMOV}.
+ kIdFcmove, //!< Instruction 'fcmove' {CMOV}.
+ kIdFcmovnb, //!< Instruction 'fcmovnb' {CMOV}.
+ kIdFcmovnbe, //!< Instruction 'fcmovnbe' {CMOV}.
+ kIdFcmovne, //!< Instruction 'fcmovne' {CMOV}.
+ kIdFcmovnu, //!< Instruction 'fcmovnu' {CMOV}.
+ kIdFcmovu, //!< Instruction 'fcmovu' {CMOV}.
+ kIdFcom, //!< Instruction 'fcom'.
+ kIdFcomi, //!< Instruction 'fcomi'.
+ kIdFcomip, //!< Instruction 'fcomip'.
+ kIdFcomp, //!< Instruction 'fcomp'.
+ kIdFcompp, //!< Instruction 'fcompp'.
+ kIdFcos, //!< Instruction 'fcos'.
+ kIdFdecstp, //!< Instruction 'fdecstp'.
+ kIdFdiv, //!< Instruction 'fdiv'.
+ kIdFdivp, //!< Instruction 'fdivp'.
+ kIdFdivr, //!< Instruction 'fdivr'.
+ kIdFdivrp, //!< Instruction 'fdivrp'.
+ kIdFemms, //!< Instruction 'femms' {3DNOW}.
+ kIdFfree, //!< Instruction 'ffree'.
+ kIdFiadd, //!< Instruction 'fiadd'.
+ kIdFicom, //!< Instruction 'ficom'.
+ kIdFicomp, //!< Instruction 'ficomp'.
+ kIdFidiv, //!< Instruction 'fidiv'.
+ kIdFidivr, //!< Instruction 'fidivr'.
+ kIdFild, //!< Instruction 'fild'.
+ kIdFimul, //!< Instruction 'fimul'.
+ kIdFincstp, //!< Instruction 'fincstp'.
+ kIdFinit, //!< Instruction 'finit'.
+ kIdFist, //!< Instruction 'fist'.
+ kIdFistp, //!< Instruction 'fistp'.
+ kIdFisttp, //!< Instruction 'fisttp' {SSE3}.
+ kIdFisub, //!< Instruction 'fisub'.
+ kIdFisubr, //!< Instruction 'fisubr'.
+ kIdFld, //!< Instruction 'fld'.
+ kIdFld1, //!< Instruction 'fld1'.
+ kIdFldcw, //!< Instruction 'fldcw'.
+ kIdFldenv, //!< Instruction 'fldenv'.
+ kIdFldl2e, //!< Instruction 'fldl2e'.
+ kIdFldl2t, //!< Instruction 'fldl2t'.
+ kIdFldlg2, //!< Instruction 'fldlg2'.
+ kIdFldln2, //!< Instruction 'fldln2'.
+ kIdFldpi, //!< Instruction 'fldpi'.
+ kIdFldz, //!< Instruction 'fldz'.
+ kIdFmul, //!< Instruction 'fmul'.
+ kIdFmulp, //!< Instruction 'fmulp'.
+ kIdFnclex, //!< Instruction 'fnclex'.
+ kIdFninit, //!< Instruction 'fninit'.
+ kIdFnop, //!< Instruction 'fnop'.
+ kIdFnsave, //!< Instruction 'fnsave'.
+ kIdFnstcw, //!< Instruction 'fnstcw'.
+ kIdFnstenv, //!< Instruction 'fnstenv'.
+ kIdFnstsw, //!< Instruction 'fnstsw'.
+ kIdFpatan, //!< Instruction 'fpatan'.
+ kIdFprem, //!< Instruction 'fprem'.
+ kIdFprem1, //!< Instruction 'fprem1'.
+ kIdFptan, //!< Instruction 'fptan'.
+ kIdFrndint, //!< Instruction 'frndint'.
+ kIdFrstor, //!< Instruction 'frstor'.
+ kIdFsave, //!< Instruction 'fsave'.
+ kIdFscale, //!< Instruction 'fscale'.
+ kIdFsin, //!< Instruction 'fsin'.
+ kIdFsincos, //!< Instruction 'fsincos'.
+ kIdFsqrt, //!< Instruction 'fsqrt'.
+ kIdFst, //!< Instruction 'fst'.
+ kIdFstcw, //!< Instruction 'fstcw'.
+ kIdFstenv, //!< Instruction 'fstenv'.
+ kIdFstp, //!< Instruction 'fstp'.
+ kIdFstsw, //!< Instruction 'fstsw'.
+ kIdFsub, //!< Instruction 'fsub'.
+ kIdFsubp, //!< Instruction 'fsubp'.
+ kIdFsubr, //!< Instruction 'fsubr'.
+ kIdFsubrp, //!< Instruction 'fsubrp'.
+ kIdFtst, //!< Instruction 'ftst'.
+ kIdFucom, //!< Instruction 'fucom'.
+ kIdFucomi, //!< Instruction 'fucomi'.
+ kIdFucomip, //!< Instruction 'fucomip'.
+ kIdFucomp, //!< Instruction 'fucomp'.
+ kIdFucompp, //!< Instruction 'fucompp'.
+ kIdFwait, //!< Instruction 'fwait'.
+ kIdFxam, //!< Instruction 'fxam'.
+ kIdFxch, //!< Instruction 'fxch'.
+ kIdFxrstor, //!< Instruction 'fxrstor' {FXSR}.
+ kIdFxrstor64, //!< Instruction 'fxrstor64' {FXSR} (X64).
+ kIdFxsave, //!< Instruction 'fxsave' {FXSR}.
+ kIdFxsave64, //!< Instruction 'fxsave64' {FXSR} (X64).
+ kIdFxtract, //!< Instruction 'fxtract'.
+ kIdFyl2x, //!< Instruction 'fyl2x'.
+ kIdFyl2xp1, //!< Instruction 'fyl2xp1'.
+ kIdGetsec, //!< Instruction 'getsec' {SMX}.
+ kIdGf2p8affineinvqb, //!< Instruction 'gf2p8affineinvqb' {GFNI}.
+ kIdGf2p8affineqb, //!< Instruction 'gf2p8affineqb' {GFNI}.
+ kIdGf2p8mulb, //!< Instruction 'gf2p8mulb' {GFNI}.
+ kIdHaddpd, //!< Instruction 'haddpd' {SSE3}.
+ kIdHaddps, //!< Instruction 'haddps' {SSE3}.
+ kIdHlt, //!< Instruction 'hlt'.
+ kIdHsubpd, //!< Instruction 'hsubpd' {SSE3}.
+ kIdHsubps, //!< Instruction 'hsubps' {SSE3}.
+ kIdIdiv, //!< Instruction 'idiv'.
+ kIdImul, //!< Instruction 'imul'.
+ kIdIn, //!< Instruction 'in'.
+ kIdInc, //!< Instruction 'inc'.
+ kIdIns, //!< Instruction 'ins'.
+ kIdInsertps, //!< Instruction 'insertps' {SSE4_1}.
+ kIdInsertq, //!< Instruction 'insertq' {SSE4A}.
+ kIdInt, //!< Instruction 'int'.
+ kIdInt3, //!< Instruction 'int3'.
+ kIdInto, //!< Instruction 'into' (X86).
+ kIdInvd, //!< Instruction 'invd' {I486}.
+ kIdInvept, //!< Instruction 'invept' {VMX}.
+ kIdInvlpg, //!< Instruction 'invlpg' {I486}.
+ kIdInvlpga, //!< Instruction 'invlpga' {SVM}.
+ kIdInvpcid, //!< Instruction 'invpcid' {I486}.
+ kIdInvvpid, //!< Instruction 'invvpid' {VMX}.
+ kIdIret, //!< Instruction 'iret'.
+ kIdIretd, //!< Instruction 'iretd'.
+ kIdIretq, //!< Instruction 'iretq' (X64).
+ kIdIretw, //!< Instruction 'iretw'.
+ kIdJa, //!< Instruction 'ja'.
+ kIdJae, //!< Instruction 'jae'.
+ kIdJb, //!< Instruction 'jb'.
+ kIdJbe, //!< Instruction 'jbe'.
+ kIdJc, //!< Instruction 'jc'.
+ kIdJe, //!< Instruction 'je'.
+ kIdJecxz, //!< Instruction 'jecxz'.
+ kIdJg, //!< Instruction 'jg'.
+ kIdJge, //!< Instruction 'jge'.
+ kIdJl, //!< Instruction 'jl'.
+ kIdJle, //!< Instruction 'jle'.
+ kIdJmp, //!< Instruction 'jmp'.
+ kIdJna, //!< Instruction 'jna'.
+ kIdJnae, //!< Instruction 'jnae'.
+ kIdJnb, //!< Instruction 'jnb'.
+ kIdJnbe, //!< Instruction 'jnbe'.
+ kIdJnc, //!< Instruction 'jnc'.
+ kIdJne, //!< Instruction 'jne'.
+ kIdJng, //!< Instruction 'jng'.
+ kIdJnge, //!< Instruction 'jnge'.
+ kIdJnl, //!< Instruction 'jnl'.
+ kIdJnle, //!< Instruction 'jnle'.
+ kIdJno, //!< Instruction 'jno'.
+ kIdJnp, //!< Instruction 'jnp'.
+ kIdJns, //!< Instruction 'jns'.
+ kIdJnz, //!< Instruction 'jnz'.
+ kIdJo, //!< Instruction 'jo'.
+ kIdJp, //!< Instruction 'jp'.
+ kIdJpe, //!< Instruction 'jpe'.
+ kIdJpo, //!< Instruction 'jpo'.
+ kIdJs, //!< Instruction 'js'.
+ kIdJz, //!< Instruction 'jz'.
+ kIdKaddb, //!< Instruction 'kaddb' {AVX512_DQ}.
+ kIdKaddd, //!< Instruction 'kaddd' {AVX512_BW}.
+ kIdKaddq, //!< Instruction 'kaddq' {AVX512_BW}.
+ kIdKaddw, //!< Instruction 'kaddw' {AVX512_DQ}.
+ kIdKandb, //!< Instruction 'kandb' {AVX512_DQ}.
+ kIdKandd, //!< Instruction 'kandd' {AVX512_BW}.
+ kIdKandnb, //!< Instruction 'kandnb' {AVX512_DQ}.
+ kIdKandnd, //!< Instruction 'kandnd' {AVX512_BW}.
+ kIdKandnq, //!< Instruction 'kandnq' {AVX512_BW}.
+ kIdKandnw, //!< Instruction 'kandnw' {AVX512_F}.
+ kIdKandq, //!< Instruction 'kandq' {AVX512_BW}.
+ kIdKandw, //!< Instruction 'kandw' {AVX512_F}.
+ kIdKmovb, //!< Instruction 'kmovb' {AVX512_DQ}.
+ kIdKmovd, //!< Instruction 'kmovd' {AVX512_BW}.
+ kIdKmovq, //!< Instruction 'kmovq' {AVX512_BW}.
+ kIdKmovw, //!< Instruction 'kmovw' {AVX512_F}.
+ kIdKnotb, //!< Instruction 'knotb' {AVX512_DQ}.
+ kIdKnotd, //!< Instruction 'knotd' {AVX512_BW}.
+ kIdKnotq, //!< Instruction 'knotq' {AVX512_BW}.
+ kIdKnotw, //!< Instruction 'knotw' {AVX512_F}.
+ kIdKorb, //!< Instruction 'korb' {AVX512_DQ}.
+ kIdKord, //!< Instruction 'kord' {AVX512_BW}.
+ kIdKorq, //!< Instruction 'korq' {AVX512_BW}.
+ kIdKortestb, //!< Instruction 'kortestb' {AVX512_DQ}.
+ kIdKortestd, //!< Instruction 'kortestd' {AVX512_BW}.
+ kIdKortestq, //!< Instruction 'kortestq' {AVX512_BW}.
+ kIdKortestw, //!< Instruction 'kortestw' {AVX512_F}.
+ kIdKorw, //!< Instruction 'korw' {AVX512_F}.
+ kIdKshiftlb, //!< Instruction 'kshiftlb' {AVX512_DQ}.
+ kIdKshiftld, //!< Instruction 'kshiftld' {AVX512_BW}.
+ kIdKshiftlq, //!< Instruction 'kshiftlq' {AVX512_BW}.
+ kIdKshiftlw, //!< Instruction 'kshiftlw' {AVX512_F}.
+ kIdKshiftrb, //!< Instruction 'kshiftrb' {AVX512_DQ}.
+ kIdKshiftrd, //!< Instruction 'kshiftrd' {AVX512_BW}.
+ kIdKshiftrq, //!< Instruction 'kshiftrq' {AVX512_BW}.
+ kIdKshiftrw, //!< Instruction 'kshiftrw' {AVX512_F}.
+ kIdKtestb, //!< Instruction 'ktestb' {AVX512_DQ}.
+ kIdKtestd, //!< Instruction 'ktestd' {AVX512_BW}.
+ kIdKtestq, //!< Instruction 'ktestq' {AVX512_BW}.
+ kIdKtestw, //!< Instruction 'ktestw' {AVX512_DQ}.
+ kIdKunpckbw, //!< Instruction 'kunpckbw' {AVX512_F}.
+ kIdKunpckdq, //!< Instruction 'kunpckdq' {AVX512_BW}.
+ kIdKunpckwd, //!< Instruction 'kunpckwd' {AVX512_BW}.
+ kIdKxnorb, //!< Instruction 'kxnorb' {AVX512_DQ}.
+ kIdKxnord, //!< Instruction 'kxnord' {AVX512_BW}.
+ kIdKxnorq, //!< Instruction 'kxnorq' {AVX512_BW}.
+ kIdKxnorw, //!< Instruction 'kxnorw' {AVX512_F}.
+ kIdKxorb, //!< Instruction 'kxorb' {AVX512_DQ}.
+ kIdKxord, //!< Instruction 'kxord' {AVX512_BW}.
+ kIdKxorq, //!< Instruction 'kxorq' {AVX512_BW}.
+ kIdKxorw, //!< Instruction 'kxorw' {AVX512_F}.
+ kIdLahf, //!< Instruction 'lahf' {LAHFSAHF}.
+ kIdLar, //!< Instruction 'lar'.
+ kIdLddqu, //!< Instruction 'lddqu' {SSE3}.
+ kIdLdmxcsr, //!< Instruction 'ldmxcsr' {SSE}.
+ kIdLds, //!< Instruction 'lds' (X86).
+ kIdLea, //!< Instruction 'lea'.
+ kIdLeave, //!< Instruction 'leave'.
+ kIdLes, //!< Instruction 'les' (X86).
+ kIdLfence, //!< Instruction 'lfence' {SSE2}.
+ kIdLfs, //!< Instruction 'lfs'.
+ kIdLgdt, //!< Instruction 'lgdt'.
+ kIdLgs, //!< Instruction 'lgs'.
+ kIdLidt, //!< Instruction 'lidt'.
+ kIdLldt, //!< Instruction 'lldt'.
+ kIdLlwpcb, //!< Instruction 'llwpcb' {LWP}.
+ kIdLmsw, //!< Instruction 'lmsw'.
+ kIdLods, //!< Instruction 'lods'.
+ kIdLoop, //!< Instruction 'loop'.
+ kIdLoope, //!< Instruction 'loope'.
+ kIdLoopne, //!< Instruction 'loopne'.
+ kIdLsl, //!< Instruction 'lsl'.
+ kIdLss, //!< Instruction 'lss'.
+ kIdLtr, //!< Instruction 'ltr'.
+ kIdLwpins, //!< Instruction 'lwpins' {LWP}.
+ kIdLwpval, //!< Instruction 'lwpval' {LWP}.
+ kIdLzcnt, //!< Instruction 'lzcnt' {LZCNT}.
+ kIdMaskmovdqu, //!< Instruction 'maskmovdqu' {SSE2}.
+ kIdMaskmovq, //!< Instruction 'maskmovq' {MMX2}.
+ kIdMaxpd, //!< Instruction 'maxpd' {SSE2}.
+ kIdMaxps, //!< Instruction 'maxps' {SSE}.
+ kIdMaxsd, //!< Instruction 'maxsd' {SSE2}.
+ kIdMaxss, //!< Instruction 'maxss' {SSE}.
+ kIdMfence, //!< Instruction 'mfence' {SSE2}.
+ kIdMinpd, //!< Instruction 'minpd' {SSE2}.
+ kIdMinps, //!< Instruction 'minps' {SSE}.
+ kIdMinsd, //!< Instruction 'minsd' {SSE2}.
+ kIdMinss, //!< Instruction 'minss' {SSE}.
+ kIdMonitor, //!< Instruction 'monitor' {MONITOR}.
+ kIdMonitorx, //!< Instruction 'monitorx' {MONITORX}.
+ kIdMov, //!< Instruction 'mov'.
+ kIdMovapd, //!< Instruction 'movapd' {SSE2}.
+ kIdMovaps, //!< Instruction 'movaps' {SSE}.
+ kIdMovbe, //!< Instruction 'movbe' {MOVBE}.
+ kIdMovd, //!< Instruction 'movd' {MMX|SSE2}.
+ kIdMovddup, //!< Instruction 'movddup' {SSE3}.
+ kIdMovdir64b, //!< Instruction 'movdir64b' {MOVDIR64B}.
+ kIdMovdiri, //!< Instruction 'movdiri' {MOVDIRI}.
+ kIdMovdq2q, //!< Instruction 'movdq2q' {SSE2}.
+ kIdMovdqa, //!< Instruction 'movdqa' {SSE2}.
+ kIdMovdqu, //!< Instruction 'movdqu' {SSE2}.
+ kIdMovhlps, //!< Instruction 'movhlps' {SSE}.
+ kIdMovhpd, //!< Instruction 'movhpd' {SSE2}.
+ kIdMovhps, //!< Instruction 'movhps' {SSE}.
+ kIdMovlhps, //!< Instruction 'movlhps' {SSE}.
+ kIdMovlpd, //!< Instruction 'movlpd' {SSE2}.
+ kIdMovlps, //!< Instruction 'movlps' {SSE}.
+ kIdMovmskpd, //!< Instruction 'movmskpd' {SSE2}.
+ kIdMovmskps, //!< Instruction 'movmskps' {SSE}.
+ kIdMovntdq, //!< Instruction 'movntdq' {SSE2}.
+ kIdMovntdqa, //!< Instruction 'movntdqa' {SSE4_1}.
+ kIdMovnti, //!< Instruction 'movnti' {SSE2}.
+ kIdMovntpd, //!< Instruction 'movntpd' {SSE2}.
+ kIdMovntps, //!< Instruction 'movntps' {SSE}.
+ kIdMovntq, //!< Instruction 'movntq' {MMX2}.
+ kIdMovntsd, //!< Instruction 'movntsd' {SSE4A}.
+ kIdMovntss, //!< Instruction 'movntss' {SSE4A}.
+ kIdMovq, //!< Instruction 'movq' {MMX|SSE2}.
+ kIdMovq2dq, //!< Instruction 'movq2dq' {SSE2}.
+ kIdMovs, //!< Instruction 'movs'.
+ kIdMovsd, //!< Instruction 'movsd' {SSE2}.
+ kIdMovshdup, //!< Instruction 'movshdup' {SSE3}.
+ kIdMovsldup, //!< Instruction 'movsldup' {SSE3}.
+ kIdMovss, //!< Instruction 'movss' {SSE}.
+ kIdMovsx, //!< Instruction 'movsx'.
+ kIdMovsxd, //!< Instruction 'movsxd' (X64).
+ kIdMovupd, //!< Instruction 'movupd' {SSE2}.
+ kIdMovups, //!< Instruction 'movups' {SSE}.
+ kIdMovzx, //!< Instruction 'movzx'.
+ kIdMpsadbw, //!< Instruction 'mpsadbw' {SSE4_1}.
+ kIdMul, //!< Instruction 'mul'.
+ kIdMulpd, //!< Instruction 'mulpd' {SSE2}.
+ kIdMulps, //!< Instruction 'mulps' {SSE}.
+ kIdMulsd, //!< Instruction 'mulsd' {SSE2}.
+ kIdMulss, //!< Instruction 'mulss' {SSE}.
+ kIdMulx, //!< Instruction 'mulx' {BMI2}.
+ kIdMwait, //!< Instruction 'mwait' {MONITOR}.
+ kIdMwaitx, //!< Instruction 'mwaitx' {MONITORX}.
+ kIdNeg, //!< Instruction 'neg'.
+ kIdNop, //!< Instruction 'nop'.
+ kIdNot, //!< Instruction 'not'.
+ kIdOr, //!< Instruction 'or'.
+ kIdOrpd, //!< Instruction 'orpd' {SSE2}.
+ kIdOrps, //!< Instruction 'orps' {SSE}.
+ kIdOut, //!< Instruction 'out'.
+ kIdOuts, //!< Instruction 'outs'.
+ kIdPabsb, //!< Instruction 'pabsb' {SSSE3}.
+ kIdPabsd, //!< Instruction 'pabsd' {SSSE3}.
+ kIdPabsw, //!< Instruction 'pabsw' {SSSE3}.
+ kIdPackssdw, //!< Instruction 'packssdw' {MMX|SSE2}.
+ kIdPacksswb, //!< Instruction 'packsswb' {MMX|SSE2}.
+ kIdPackusdw, //!< Instruction 'packusdw' {SSE4_1}.
+ kIdPackuswb, //!< Instruction 'packuswb' {MMX|SSE2}.
+ kIdPaddb, //!< Instruction 'paddb' {MMX|SSE2}.
+ kIdPaddd, //!< Instruction 'paddd' {MMX|SSE2}.
+ kIdPaddq, //!< Instruction 'paddq' {SSE2}.
+ kIdPaddsb, //!< Instruction 'paddsb' {MMX|SSE2}.
+ kIdPaddsw, //!< Instruction 'paddsw' {MMX|SSE2}.
+ kIdPaddusb, //!< Instruction 'paddusb' {MMX|SSE2}.
+ kIdPaddusw, //!< Instruction 'paddusw' {MMX|SSE2}.
+ kIdPaddw, //!< Instruction 'paddw' {MMX|SSE2}.
+ kIdPalignr, //!< Instruction 'palignr' {SSE3}.
+ kIdPand, //!< Instruction 'pand' {MMX|SSE2}.
+ kIdPandn, //!< Instruction 'pandn' {MMX|SSE2}.
+ kIdPause, //!< Instruction 'pause'.
+ kIdPavgb, //!< Instruction 'pavgb' {MMX2|SSE2}.
+ kIdPavgusb, //!< Instruction 'pavgusb' {3DNOW}.
+ kIdPavgw, //!< Instruction 'pavgw' {MMX2|SSE2}.
+ kIdPblendvb, //!< Instruction 'pblendvb' {SSE4_1}.
+ kIdPblendw, //!< Instruction 'pblendw' {SSE4_1}.
+ kIdPclmulqdq, //!< Instruction 'pclmulqdq' {PCLMULQDQ}.
+ kIdPcmpeqb, //!< Instruction 'pcmpeqb' {MMX|SSE2}.
+ kIdPcmpeqd, //!< Instruction 'pcmpeqd' {MMX|SSE2}.
+ kIdPcmpeqq, //!< Instruction 'pcmpeqq' {SSE4_1}.
+ kIdPcmpeqw, //!< Instruction 'pcmpeqw' {MMX|SSE2}.
+ kIdPcmpestri, //!< Instruction 'pcmpestri' {SSE4_2}.
+ kIdPcmpestrm, //!< Instruction 'pcmpestrm' {SSE4_2}.
+ kIdPcmpgtb, //!< Instruction 'pcmpgtb' {MMX|SSE2}.
+ kIdPcmpgtd, //!< Instruction 'pcmpgtd' {MMX|SSE2}.
+ kIdPcmpgtq, //!< Instruction 'pcmpgtq' {SSE4_2}.
+ kIdPcmpgtw, //!< Instruction 'pcmpgtw' {MMX|SSE2}.
+ kIdPcmpistri, //!< Instruction 'pcmpistri' {SSE4_2}.
+ kIdPcmpistrm, //!< Instruction 'pcmpistrm' {SSE4_2}.
+ kIdPcommit, //!< Instruction 'pcommit' {PCOMMIT}.
+ kIdPdep, //!< Instruction 'pdep' {BMI2}.
+ kIdPext, //!< Instruction 'pext' {BMI2}.
+ kIdPextrb, //!< Instruction 'pextrb' {SSE4_1}.
+ kIdPextrd, //!< Instruction 'pextrd' {SSE4_1}.
+ kIdPextrq, //!< Instruction 'pextrq' {SSE4_1} (X64).
+ kIdPextrw, //!< Instruction 'pextrw' {MMX2|SSE2|SSE4_1}.
+ kIdPf2id, //!< Instruction 'pf2id' {3DNOW}.
+ kIdPf2iw, //!< Instruction 'pf2iw' {3DNOW2}.
+ kIdPfacc, //!< Instruction 'pfacc' {3DNOW}.
+ kIdPfadd, //!< Instruction 'pfadd' {3DNOW}.
+ kIdPfcmpeq, //!< Instruction 'pfcmpeq' {3DNOW}.
+ kIdPfcmpge, //!< Instruction 'pfcmpge' {3DNOW}.
+ kIdPfcmpgt, //!< Instruction 'pfcmpgt' {3DNOW}.
+ kIdPfmax, //!< Instruction 'pfmax' {3DNOW}.
+ kIdPfmin, //!< Instruction 'pfmin' {3DNOW}.
+ kIdPfmul, //!< Instruction 'pfmul' {3DNOW}.
+ kIdPfnacc, //!< Instruction 'pfnacc' {3DNOW2}.
+ kIdPfpnacc, //!< Instruction 'pfpnacc' {3DNOW2}.
+ kIdPfrcp, //!< Instruction 'pfrcp' {3DNOW}.
+ kIdPfrcpit1, //!< Instruction 'pfrcpit1' {3DNOW}.
+ kIdPfrcpit2, //!< Instruction 'pfrcpit2' {3DNOW}.
+ kIdPfrcpv, //!< Instruction 'pfrcpv' {GEODE}.
+ kIdPfrsqit1, //!< Instruction 'pfrsqit1' {3DNOW}.
+ kIdPfrsqrt, //!< Instruction 'pfrsqrt' {3DNOW}.
+ kIdPfrsqrtv, //!< Instruction 'pfrsqrtv' {GEODE}.
+ kIdPfsub, //!< Instruction 'pfsub' {3DNOW}.
+ kIdPfsubr, //!< Instruction 'pfsubr' {3DNOW}.
+ kIdPhaddd, //!< Instruction 'phaddd' {SSSE3}.
+ kIdPhaddsw, //!< Instruction 'phaddsw' {SSSE3}.
+ kIdPhaddw, //!< Instruction 'phaddw' {SSSE3}.
+ kIdPhminposuw, //!< Instruction 'phminposuw' {SSE4_1}.
+ kIdPhsubd, //!< Instruction 'phsubd' {SSSE3}.
+ kIdPhsubsw, //!< Instruction 'phsubsw' {SSSE3}.
+ kIdPhsubw, //!< Instruction 'phsubw' {SSSE3}.
+ kIdPi2fd, //!< Instruction 'pi2fd' {3DNOW}.
+ kIdPi2fw, //!< Instruction 'pi2fw' {3DNOW2}.
+ kIdPinsrb, //!< Instruction 'pinsrb' {SSE4_1}.
+ kIdPinsrd, //!< Instruction 'pinsrd' {SSE4_1}.
+ kIdPinsrq, //!< Instruction 'pinsrq' {SSE4_1} (X64).
+ kIdPinsrw, //!< Instruction 'pinsrw' {MMX2|SSE2}.
+ kIdPmaddubsw, //!< Instruction 'pmaddubsw' {SSSE3}.
+ kIdPmaddwd, //!< Instruction 'pmaddwd' {MMX|SSE2}.
+ kIdPmaxsb, //!< Instruction 'pmaxsb' {SSE4_1}.
+ kIdPmaxsd, //!< Instruction 'pmaxsd' {SSE4_1}.
+ kIdPmaxsw, //!< Instruction 'pmaxsw' {MMX2|SSE2}.
+ kIdPmaxub, //!< Instruction 'pmaxub' {MMX2|SSE2}.
+ kIdPmaxud, //!< Instruction 'pmaxud' {SSE4_1}.
+ kIdPmaxuw, //!< Instruction 'pmaxuw' {SSE4_1}.
+ kIdPminsb, //!< Instruction 'pminsb' {SSE4_1}.
+ kIdPminsd, //!< Instruction 'pminsd' {SSE4_1}.
+ kIdPminsw, //!< Instruction 'pminsw' {MMX2|SSE2}.
+ kIdPminub, //!< Instruction 'pminub' {MMX2|SSE2}.
+ kIdPminud, //!< Instruction 'pminud' {SSE4_1}.
+ kIdPminuw, //!< Instruction 'pminuw' {SSE4_1}.
+ kIdPmovmskb, //!< Instruction 'pmovmskb' {MMX2|SSE2}.
+ kIdPmovsxbd, //!< Instruction 'pmovsxbd' {SSE4_1}.
+ kIdPmovsxbq, //!< Instruction 'pmovsxbq' {SSE4_1}.
+ kIdPmovsxbw, //!< Instruction 'pmovsxbw' {SSE4_1}.
+ kIdPmovsxdq, //!< Instruction 'pmovsxdq' {SSE4_1}.
+ kIdPmovsxwd, //!< Instruction 'pmovsxwd' {SSE4_1}.
+ kIdPmovsxwq, //!< Instruction 'pmovsxwq' {SSE4_1}.
+ kIdPmovzxbd, //!< Instruction 'pmovzxbd' {SSE4_1}.
+ kIdPmovzxbq, //!< Instruction 'pmovzxbq' {SSE4_1}.
+ kIdPmovzxbw, //!< Instruction 'pmovzxbw' {SSE4_1}.
+ kIdPmovzxdq, //!< Instruction 'pmovzxdq' {SSE4_1}.
+ kIdPmovzxwd, //!< Instruction 'pmovzxwd' {SSE4_1}.
+ kIdPmovzxwq, //!< Instruction 'pmovzxwq' {SSE4_1}.
+ kIdPmuldq, //!< Instruction 'pmuldq' {SSE4_1}.
+ kIdPmulhrsw, //!< Instruction 'pmulhrsw' {SSSE3}.
+ kIdPmulhrw, //!< Instruction 'pmulhrw' {3DNOW}.
+ kIdPmulhuw, //!< Instruction 'pmulhuw' {MMX2|SSE2}.
+ kIdPmulhw, //!< Instruction 'pmulhw' {MMX|SSE2}.
+ kIdPmulld, //!< Instruction 'pmulld' {SSE4_1}.
+ kIdPmullw, //!< Instruction 'pmullw' {MMX|SSE2}.
+ kIdPmuludq, //!< Instruction 'pmuludq' {SSE2}.
+ kIdPop, //!< Instruction 'pop'.
+ kIdPopa, //!< Instruction 'popa' (X86).
+ kIdPopad, //!< Instruction 'popad' (X86).
+ kIdPopcnt, //!< Instruction 'popcnt' {POPCNT}.
+ kIdPopf, //!< Instruction 'popf'.
+ kIdPopfd, //!< Instruction 'popfd' (X86).
+ kIdPopfq, //!< Instruction 'popfq' (X64).
+ kIdPor, //!< Instruction 'por' {MMX|SSE2}.
+ kIdPrefetch, //!< Instruction 'prefetch' {3DNOW}.
+ kIdPrefetchnta, //!< Instruction 'prefetchnta' {MMX2}.
+ kIdPrefetcht0, //!< Instruction 'prefetcht0' {MMX2}.
+ kIdPrefetcht1, //!< Instruction 'prefetcht1' {MMX2}.
+ kIdPrefetcht2, //!< Instruction 'prefetcht2' {MMX2}.
+ kIdPrefetchw, //!< Instruction 'prefetchw' {PREFETCHW}.
+ kIdPrefetchwt1, //!< Instruction 'prefetchwt1' {PREFETCHWT1}.
+ kIdPsadbw, //!< Instruction 'psadbw' {MMX2|SSE2}.
+ kIdPshufb, //!< Instruction 'pshufb' {SSSE3}.
+ kIdPshufd, //!< Instruction 'pshufd' {SSE2}.
+ kIdPshufhw, //!< Instruction 'pshufhw' {SSE2}.
+ kIdPshuflw, //!< Instruction 'pshuflw' {SSE2}.
+ kIdPshufw, //!< Instruction 'pshufw' {MMX2}.
+ kIdPsignb, //!< Instruction 'psignb' {SSSE3}.
+ kIdPsignd, //!< Instruction 'psignd' {SSSE3}.
+ kIdPsignw, //!< Instruction 'psignw' {SSSE3}.
+ kIdPslld, //!< Instruction 'pslld' {MMX|SSE2}.
+ kIdPslldq, //!< Instruction 'pslldq' {SSE2}.
+ kIdPsllq, //!< Instruction 'psllq' {MMX|SSE2}.
+ kIdPsllw, //!< Instruction 'psllw' {MMX|SSE2}.
+ kIdPsrad, //!< Instruction 'psrad' {MMX|SSE2}.
+ kIdPsraw, //!< Instruction 'psraw' {MMX|SSE2}.
+ kIdPsrld, //!< Instruction 'psrld' {MMX|SSE2}.
+ kIdPsrldq, //!< Instruction 'psrldq' {SSE2}.
+ kIdPsrlq, //!< Instruction 'psrlq' {MMX|SSE2}.
+ kIdPsrlw, //!< Instruction 'psrlw' {MMX|SSE2}.
+ kIdPsubb, //!< Instruction 'psubb' {MMX|SSE2}.
+ kIdPsubd, //!< Instruction 'psubd' {MMX|SSE2}.
+ kIdPsubq, //!< Instruction 'psubq' {SSE2}.
+ kIdPsubsb, //!< Instruction 'psubsb' {MMX|SSE2}.
+ kIdPsubsw, //!< Instruction 'psubsw' {MMX|SSE2}.
+ kIdPsubusb, //!< Instruction 'psubusb' {MMX|SSE2}.
+ kIdPsubusw, //!< Instruction 'psubusw' {MMX|SSE2}.
+ kIdPsubw, //!< Instruction 'psubw' {MMX|SSE2}.
+ kIdPswapd, //!< Instruction 'pswapd' {3DNOW2}.
+ kIdPtest, //!< Instruction 'ptest' {SSE4_1}.
+ kIdPunpckhbw, //!< Instruction 'punpckhbw' {MMX|SSE2}.
+ kIdPunpckhdq, //!< Instruction 'punpckhdq' {MMX|SSE2}.
+ kIdPunpckhqdq, //!< Instruction 'punpckhqdq' {SSE2}.
+ kIdPunpckhwd, //!< Instruction 'punpckhwd' {MMX|SSE2}.
+ kIdPunpcklbw, //!< Instruction 'punpcklbw' {MMX|SSE2}.
+ kIdPunpckldq, //!< Instruction 'punpckldq' {MMX|SSE2}.
+ kIdPunpcklqdq, //!< Instruction 'punpcklqdq' {SSE2}.
+ kIdPunpcklwd, //!< Instruction 'punpcklwd' {MMX|SSE2}.
+ kIdPush, //!< Instruction 'push'.
+ kIdPusha, //!< Instruction 'pusha' (X86).
+ kIdPushad, //!< Instruction 'pushad' (X86).
+ kIdPushf, //!< Instruction 'pushf'.
+ kIdPushfd, //!< Instruction 'pushfd' (X86).
+ kIdPushfq, //!< Instruction 'pushfq' (X64).
+ kIdPxor, //!< Instruction 'pxor' {MMX|SSE2}.
+ kIdRcl, //!< Instruction 'rcl'.
+ kIdRcpps, //!< Instruction 'rcpps' {SSE}.
+ kIdRcpss, //!< Instruction 'rcpss' {SSE}.
+ kIdRcr, //!< Instruction 'rcr'.
+ kIdRdfsbase, //!< Instruction 'rdfsbase' {FSGSBASE} (X64).
+ kIdRdgsbase, //!< Instruction 'rdgsbase' {FSGSBASE} (X64).
+ kIdRdmsr, //!< Instruction 'rdmsr' {MSR}.
+ kIdRdpid, //!< Instruction 'rdpid' {RDPID}.
+ kIdRdpmc, //!< Instruction 'rdpmc'.
+ kIdRdrand, //!< Instruction 'rdrand' {RDRAND}.
+ kIdRdseed, //!< Instruction 'rdseed' {RDSEED}.
+ kIdRdtsc, //!< Instruction 'rdtsc' {RDTSC}.
+ kIdRdtscp, //!< Instruction 'rdtscp' {RDTSCP}.
+ kIdRet, //!< Instruction 'ret'.
+ kIdRol, //!< Instruction 'rol'.
+ kIdRor, //!< Instruction 'ror'.
+ kIdRorx, //!< Instruction 'rorx' {BMI2}.
+ kIdRoundpd, //!< Instruction 'roundpd' {SSE4_1}.
+ kIdRoundps, //!< Instruction 'roundps' {SSE4_1}.
+ kIdRoundsd, //!< Instruction 'roundsd' {SSE4_1}.
+ kIdRoundss, //!< Instruction 'roundss' {SSE4_1}.
+ kIdRsm, //!< Instruction 'rsm' (X86).
+ kIdRsqrtps, //!< Instruction 'rsqrtps' {SSE}.
+ kIdRsqrtss, //!< Instruction 'rsqrtss' {SSE}.
+ kIdSahf, //!< Instruction 'sahf' {LAHFSAHF}.
+ kIdSal, //!< Instruction 'sal'.
+ kIdSar, //!< Instruction 'sar'.
+ kIdSarx, //!< Instruction 'sarx' {BMI2}.
+ kIdSbb, //!< Instruction 'sbb'.
+ kIdScas, //!< Instruction 'scas'.
+ kIdSeta, //!< Instruction 'seta'.
+ kIdSetae, //!< Instruction 'setae'.
+ kIdSetb, //!< Instruction 'setb'.
+ kIdSetbe, //!< Instruction 'setbe'.
+ kIdSetc, //!< Instruction 'setc'.
+ kIdSete, //!< Instruction 'sete'.
+ kIdSetg, //!< Instruction 'setg'.
+ kIdSetge, //!< Instruction 'setge'.
+ kIdSetl, //!< Instruction 'setl'.
+ kIdSetle, //!< Instruction 'setle'.
+ kIdSetna, //!< Instruction 'setna'.
+ kIdSetnae, //!< Instruction 'setnae'.
+ kIdSetnb, //!< Instruction 'setnb'.
+ kIdSetnbe, //!< Instruction 'setnbe'.
+ kIdSetnc, //!< Instruction 'setnc'.
+ kIdSetne, //!< Instruction 'setne'.
+ kIdSetng, //!< Instruction 'setng'.
+ kIdSetnge, //!< Instruction 'setnge'.
+ kIdSetnl, //!< Instruction 'setnl'.
+ kIdSetnle, //!< Instruction 'setnle'.
+ kIdSetno, //!< Instruction 'setno'.
+ kIdSetnp, //!< Instruction 'setnp'.
+ kIdSetns, //!< Instruction 'setns'.
+ kIdSetnz, //!< Instruction 'setnz'.
+ kIdSeto, //!< Instruction 'seto'.
+ kIdSetp, //!< Instruction 'setp'.
+ kIdSetpe, //!< Instruction 'setpe'.
+ kIdSetpo, //!< Instruction 'setpo'.
+ kIdSets, //!< Instruction 'sets'.
+ kIdSetz, //!< Instruction 'setz'.
+ kIdSfence, //!< Instruction 'sfence' {MMX2}.
+ kIdSgdt, //!< Instruction 'sgdt'.
+ kIdSha1msg1, //!< Instruction 'sha1msg1' {SHA}.
+ kIdSha1msg2, //!< Instruction 'sha1msg2' {SHA}.
+ kIdSha1nexte, //!< Instruction 'sha1nexte' {SHA}.
+ kIdSha1rnds4, //!< Instruction 'sha1rnds4' {SHA}.
+ kIdSha256msg1, //!< Instruction 'sha256msg1' {SHA}.
+ kIdSha256msg2, //!< Instruction 'sha256msg2' {SHA}.
+ kIdSha256rnds2, //!< Instruction 'sha256rnds2' {SHA}.
+ kIdShl, //!< Instruction 'shl'.
+ kIdShld, //!< Instruction 'shld'.
+ kIdShlx, //!< Instruction 'shlx' {BMI2}.
+ kIdShr, //!< Instruction 'shr'.
+ kIdShrd, //!< Instruction 'shrd'.
+ kIdShrx, //!< Instruction 'shrx' {BMI2}.
+ kIdShufpd, //!< Instruction 'shufpd' {SSE2}.
+ kIdShufps, //!< Instruction 'shufps' {SSE}.
+ kIdSidt, //!< Instruction 'sidt'.
+ kIdSkinit, //!< Instruction 'skinit' {SKINIT}.
+ kIdSldt, //!< Instruction 'sldt'.
+ kIdSlwpcb, //!< Instruction 'slwpcb' {LWP}.
+ kIdSmsw, //!< Instruction 'smsw'.
+ kIdSqrtpd, //!< Instruction 'sqrtpd' {SSE2}.
+ kIdSqrtps, //!< Instruction 'sqrtps' {SSE}.
+ kIdSqrtsd, //!< Instruction 'sqrtsd' {SSE2}.
+ kIdSqrtss, //!< Instruction 'sqrtss' {SSE}.
+ kIdStac, //!< Instruction 'stac' {SMAP}.
+ kIdStc, //!< Instruction 'stc'.
+ kIdStd, //!< Instruction 'std'.
+ kIdStgi, //!< Instruction 'stgi' {SKINIT}.
+ kIdSti, //!< Instruction 'sti'.
+ kIdStmxcsr, //!< Instruction 'stmxcsr' {SSE}.
+ kIdStos, //!< Instruction 'stos'.
+ kIdStr, //!< Instruction 'str'.
+ kIdSub, //!< Instruction 'sub'.
+ kIdSubpd, //!< Instruction 'subpd' {SSE2}.
+ kIdSubps, //!< Instruction 'subps' {SSE}.
+ kIdSubsd, //!< Instruction 'subsd' {SSE2}.
+ kIdSubss, //!< Instruction 'subss' {SSE}.
+ kIdSwapgs, //!< Instruction 'swapgs' (X64).
+ kIdSyscall, //!< Instruction 'syscall' (X64).
+ kIdSysenter, //!< Instruction 'sysenter'.
+ kIdSysexit, //!< Instruction 'sysexit'.
+ kIdSysexit64, //!< Instruction 'sysexit64'.
+ kIdSysret, //!< Instruction 'sysret' (X64).
+ kIdSysret64, //!< Instruction 'sysret64' (X64).
+ kIdT1mskc, //!< Instruction 't1mskc' {TBM}.
+ kIdTest, //!< Instruction 'test'.
+ kIdTzcnt, //!< Instruction 'tzcnt' {BMI}.
+ kIdTzmsk, //!< Instruction 'tzmsk' {TBM}.
+ kIdUcomisd, //!< Instruction 'ucomisd' {SSE2}.
+ kIdUcomiss, //!< Instruction 'ucomiss' {SSE}.
+ kIdUd2, //!< Instruction 'ud2'.
+ kIdUnpckhpd, //!< Instruction 'unpckhpd' {SSE2}.
+ kIdUnpckhps, //!< Instruction 'unpckhps' {SSE}.
+ kIdUnpcklpd, //!< Instruction 'unpcklpd' {SSE2}.
+ kIdUnpcklps, //!< Instruction 'unpcklps' {SSE}.
+ kIdV4fmaddps, //!< Instruction 'v4fmaddps' {AVX512_4FMAPS}.
+ kIdV4fmaddss, //!< Instruction 'v4fmaddss' {AVX512_4FMAPS}.
+ kIdV4fnmaddps, //!< Instruction 'v4fnmaddps' {AVX512_4FMAPS}.
+ kIdV4fnmaddss, //!< Instruction 'v4fnmaddss' {AVX512_4FMAPS}.
+ kIdVaddpd, //!< Instruction 'vaddpd' {AVX|AVX512_F+VL}.
+ kIdVaddps, //!< Instruction 'vaddps' {AVX|AVX512_F+VL}.
+ kIdVaddsd, //!< Instruction 'vaddsd' {AVX|AVX512_F}.
+ kIdVaddss, //!< Instruction 'vaddss' {AVX|AVX512_F}.
+ kIdVaddsubpd, //!< Instruction 'vaddsubpd' {AVX}.
+ kIdVaddsubps, //!< Instruction 'vaddsubps' {AVX}.
+ kIdVaesdec, //!< Instruction 'vaesdec' {AVX|AVX512_F+VL & AESNI|VAES}.
+ kIdVaesdeclast, //!< Instruction 'vaesdeclast' {AVX|AVX512_F+VL & AESNI|VAES}.
+ kIdVaesenc, //!< Instruction 'vaesenc' {AVX|AVX512_F+VL & AESNI|VAES}.
+ kIdVaesenclast, //!< Instruction 'vaesenclast' {AVX|AVX512_F+VL & AESNI|VAES}.
+ kIdVaesimc, //!< Instruction 'vaesimc' {AVX & AESNI}.
+ kIdVaeskeygenassist, //!< Instruction 'vaeskeygenassist' {AVX & AESNI}.
+ kIdValignd, //!< Instruction 'valignd' {AVX512_F+VL}.
+ kIdValignq, //!< Instruction 'valignq' {AVX512_F+VL}.
+ kIdVandnpd, //!< Instruction 'vandnpd' {AVX|AVX512_DQ+VL}.
+ kIdVandnps, //!< Instruction 'vandnps' {AVX|AVX512_DQ+VL}.
+ kIdVandpd, //!< Instruction 'vandpd' {AVX|AVX512_DQ+VL}.
+ kIdVandps, //!< Instruction 'vandps' {AVX|AVX512_DQ+VL}.
+ kIdVblendmb, //!< Instruction 'vblendmb' {AVX512_BW+VL}.
+ kIdVblendmd, //!< Instruction 'vblendmd' {AVX512_F+VL}.
+ kIdVblendmpd, //!< Instruction 'vblendmpd' {AVX512_F+VL}.
+ kIdVblendmps, //!< Instruction 'vblendmps' {AVX512_F+VL}.
+ kIdVblendmq, //!< Instruction 'vblendmq' {AVX512_F+VL}.
+ kIdVblendmw, //!< Instruction 'vblendmw' {AVX512_BW+VL}.
+ kIdVblendpd, //!< Instruction 'vblendpd' {AVX}.
+ kIdVblendps, //!< Instruction 'vblendps' {AVX}.
+ kIdVblendvpd, //!< Instruction 'vblendvpd' {AVX}.
+ kIdVblendvps, //!< Instruction 'vblendvps' {AVX}.
+ kIdVbroadcastf128, //!< Instruction 'vbroadcastf128' {AVX}.
+ kIdVbroadcastf32x2, //!< Instruction 'vbroadcastf32x2' {AVX512_DQ+VL}.
+ kIdVbroadcastf32x4, //!< Instruction 'vbroadcastf32x4' {AVX512_F}.
+ kIdVbroadcastf32x8, //!< Instruction 'vbroadcastf32x8' {AVX512_DQ}.
+ kIdVbroadcastf64x2, //!< Instruction 'vbroadcastf64x2' {AVX512_DQ+VL}.
+ kIdVbroadcastf64x4, //!< Instruction 'vbroadcastf64x4' {AVX512_F}.
+ kIdVbroadcasti128, //!< Instruction 'vbroadcasti128' {AVX2}.
+ kIdVbroadcasti32x2, //!< Instruction 'vbroadcasti32x2' {AVX512_DQ+VL}.
+ kIdVbroadcasti32x4, //!< Instruction 'vbroadcasti32x4' {AVX512_F+VL}.
+ kIdVbroadcasti32x8, //!< Instruction 'vbroadcasti32x8' {AVX512_DQ}.
+ kIdVbroadcasti64x2, //!< Instruction 'vbroadcasti64x2' {AVX512_DQ+VL}.
+ kIdVbroadcasti64x4, //!< Instruction 'vbroadcasti64x4' {AVX512_F}.
+ kIdVbroadcastsd, //!< Instruction 'vbroadcastsd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVbroadcastss, //!< Instruction 'vbroadcastss' {AVX|AVX2|AVX512_F+VL}.
+ kIdVcmppd, //!< Instruction 'vcmppd' {AVX|AVX512_F+VL}.
+ kIdVcmpps, //!< Instruction 'vcmpps' {AVX|AVX512_F+VL}.
+ kIdVcmpsd, //!< Instruction 'vcmpsd' {AVX|AVX512_F}.
+ kIdVcmpss, //!< Instruction 'vcmpss' {AVX|AVX512_F}.
+ kIdVcomisd, //!< Instruction 'vcomisd' {AVX|AVX512_F}.
+ kIdVcomiss, //!< Instruction 'vcomiss' {AVX|AVX512_F}.
+ kIdVcompresspd, //!< Instruction 'vcompresspd' {AVX512_F+VL}.
+ kIdVcompressps, //!< Instruction 'vcompressps' {AVX512_F+VL}.
+ kIdVcvtdq2pd, //!< Instruction 'vcvtdq2pd' {AVX|AVX512_F+VL}.
+ kIdVcvtdq2ps, //!< Instruction 'vcvtdq2ps' {AVX|AVX512_F+VL}.
+ kIdVcvtne2ps2bf16, //!< Instruction 'vcvtne2ps2bf16' {AVX512_BF16+VL}.
+ kIdVcvtneps2bf16, //!< Instruction 'vcvtneps2bf16' {AVX512_BF16+VL}.
+ kIdVcvtpd2dq, //!< Instruction 'vcvtpd2dq' {AVX|AVX512_F+VL}.
+ kIdVcvtpd2ps, //!< Instruction 'vcvtpd2ps' {AVX|AVX512_F+VL}.
+ kIdVcvtpd2qq, //!< Instruction 'vcvtpd2qq' {AVX512_DQ+VL}.
+ kIdVcvtpd2udq, //!< Instruction 'vcvtpd2udq' {AVX512_F+VL}.
+ kIdVcvtpd2uqq, //!< Instruction 'vcvtpd2uqq' {AVX512_DQ+VL}.
+ kIdVcvtph2ps, //!< Instruction 'vcvtph2ps' {AVX512_F+VL & F16C}.
+ kIdVcvtps2dq, //!< Instruction 'vcvtps2dq' {AVX|AVX512_F+VL}.
+ kIdVcvtps2pd, //!< Instruction 'vcvtps2pd' {AVX|AVX512_F+VL}.
+ kIdVcvtps2ph, //!< Instruction 'vcvtps2ph' {AVX512_F+VL & F16C}.
+ kIdVcvtps2qq, //!< Instruction 'vcvtps2qq' {AVX512_DQ+VL}.
+ kIdVcvtps2udq, //!< Instruction 'vcvtps2udq' {AVX512_F+VL}.
+ kIdVcvtps2uqq, //!< Instruction 'vcvtps2uqq' {AVX512_DQ+VL}.
+ kIdVcvtqq2pd, //!< Instruction 'vcvtqq2pd' {AVX512_DQ+VL}.
+ kIdVcvtqq2ps, //!< Instruction 'vcvtqq2ps' {AVX512_DQ+VL}.
+ kIdVcvtsd2si, //!< Instruction 'vcvtsd2si' {AVX|AVX512_F}.
+ kIdVcvtsd2ss, //!< Instruction 'vcvtsd2ss' {AVX|AVX512_F}.
+ kIdVcvtsd2usi, //!< Instruction 'vcvtsd2usi' {AVX512_F}.
+ kIdVcvtsi2sd, //!< Instruction 'vcvtsi2sd' {AVX|AVX512_F}.
+ kIdVcvtsi2ss, //!< Instruction 'vcvtsi2ss' {AVX|AVX512_F}.
+ kIdVcvtss2sd, //!< Instruction 'vcvtss2sd' {AVX|AVX512_F}.
+ kIdVcvtss2si, //!< Instruction 'vcvtss2si' {AVX|AVX512_F}.
+ kIdVcvtss2usi, //!< Instruction 'vcvtss2usi' {AVX512_F}.
+ kIdVcvttpd2dq, //!< Instruction 'vcvttpd2dq' {AVX|AVX512_F+VL}.
+ kIdVcvttpd2qq, //!< Instruction 'vcvttpd2qq' {AVX512_F+VL}.
+ kIdVcvttpd2udq, //!< Instruction 'vcvttpd2udq' {AVX512_F+VL}.
+ kIdVcvttpd2uqq, //!< Instruction 'vcvttpd2uqq' {AVX512_DQ+VL}.
+ kIdVcvttps2dq, //!< Instruction 'vcvttps2dq' {AVX|AVX512_F+VL}.
+ kIdVcvttps2qq, //!< Instruction 'vcvttps2qq' {AVX512_DQ+VL}.
+ kIdVcvttps2udq, //!< Instruction 'vcvttps2udq' {AVX512_F+VL}.
+ kIdVcvttps2uqq, //!< Instruction 'vcvttps2uqq' {AVX512_DQ+VL}.
+ kIdVcvttsd2si, //!< Instruction 'vcvttsd2si' {AVX|AVX512_F}.
+ kIdVcvttsd2usi, //!< Instruction 'vcvttsd2usi' {AVX512_F}.
+ kIdVcvttss2si, //!< Instruction 'vcvttss2si' {AVX|AVX512_F}.
+ kIdVcvttss2usi, //!< Instruction 'vcvttss2usi' {AVX512_F}.
+ kIdVcvtudq2pd, //!< Instruction 'vcvtudq2pd' {AVX512_F+VL}.
+ kIdVcvtudq2ps, //!< Instruction 'vcvtudq2ps' {AVX512_F+VL}.
+ kIdVcvtuqq2pd, //!< Instruction 'vcvtuqq2pd' {AVX512_DQ+VL}.
+ kIdVcvtuqq2ps, //!< Instruction 'vcvtuqq2ps' {AVX512_DQ+VL}.
+ kIdVcvtusi2sd, //!< Instruction 'vcvtusi2sd' {AVX512_F}.
+ kIdVcvtusi2ss, //!< Instruction 'vcvtusi2ss' {AVX512_F}.
+ kIdVdbpsadbw, //!< Instruction 'vdbpsadbw' {AVX512_BW+VL}.
+ kIdVdivpd, //!< Instruction 'vdivpd' {AVX|AVX512_F+VL}.
+ kIdVdivps, //!< Instruction 'vdivps' {AVX|AVX512_F+VL}.
+ kIdVdivsd, //!< Instruction 'vdivsd' {AVX|AVX512_F}.
+ kIdVdivss, //!< Instruction 'vdivss' {AVX|AVX512_F}.
+ kIdVdpbf16ps, //!< Instruction 'vdpbf16ps' {AVX512_BF16+VL}.
+ kIdVdppd, //!< Instruction 'vdppd' {AVX}.
+ kIdVdpps, //!< Instruction 'vdpps' {AVX}.
+ kIdVerr, //!< Instruction 'verr'.
+ kIdVerw, //!< Instruction 'verw'.
+ kIdVexp2pd, //!< Instruction 'vexp2pd' {AVX512_ERI}.
+ kIdVexp2ps, //!< Instruction 'vexp2ps' {AVX512_ERI}.
+ kIdVexpandpd, //!< Instruction 'vexpandpd' {AVX512_F+VL}.
+ kIdVexpandps, //!< Instruction 'vexpandps' {AVX512_F+VL}.
+ kIdVextractf128, //!< Instruction 'vextractf128' {AVX}.
+ kIdVextractf32x4, //!< Instruction 'vextractf32x4' {AVX512_F+VL}.
+ kIdVextractf32x8, //!< Instruction 'vextractf32x8' {AVX512_DQ}.
+ kIdVextractf64x2, //!< Instruction 'vextractf64x2' {AVX512_DQ+VL}.
+ kIdVextractf64x4, //!< Instruction 'vextractf64x4' {AVX512_F}.
+ kIdVextracti128, //!< Instruction 'vextracti128' {AVX2}.
+ kIdVextracti32x4, //!< Instruction 'vextracti32x4' {AVX512_F+VL}.
+ kIdVextracti32x8, //!< Instruction 'vextracti32x8' {AVX512_DQ}.
+ kIdVextracti64x2, //!< Instruction 'vextracti64x2' {AVX512_DQ+VL}.
+ kIdVextracti64x4, //!< Instruction 'vextracti64x4' {AVX512_F}.
+ kIdVextractps, //!< Instruction 'vextractps' {AVX|AVX512_F}.
+ kIdVfixupimmpd, //!< Instruction 'vfixupimmpd' {AVX512_F+VL}.
+ kIdVfixupimmps, //!< Instruction 'vfixupimmps' {AVX512_F+VL}.
+ kIdVfixupimmsd, //!< Instruction 'vfixupimmsd' {AVX512_F}.
+ kIdVfixupimmss, //!< Instruction 'vfixupimmss' {AVX512_F}.
+ kIdVfmadd132pd, //!< Instruction 'vfmadd132pd' {FMA|AVX512_F+VL}.
+ kIdVfmadd132ps, //!< Instruction 'vfmadd132ps' {FMA|AVX512_F+VL}.
+ kIdVfmadd132sd, //!< Instruction 'vfmadd132sd' {FMA|AVX512_F}.
+ kIdVfmadd132ss, //!< Instruction 'vfmadd132ss' {FMA|AVX512_F}.
+ kIdVfmadd213pd, //!< Instruction 'vfmadd213pd' {FMA|AVX512_F+VL}.
+ kIdVfmadd213ps, //!< Instruction 'vfmadd213ps' {FMA|AVX512_F+VL}.
+ kIdVfmadd213sd, //!< Instruction 'vfmadd213sd' {FMA|AVX512_F}.
+ kIdVfmadd213ss, //!< Instruction 'vfmadd213ss' {FMA|AVX512_F}.
+ kIdVfmadd231pd, //!< Instruction 'vfmadd231pd' {FMA|AVX512_F+VL}.
+ kIdVfmadd231ps, //!< Instruction 'vfmadd231ps' {FMA|AVX512_F+VL}.
+ kIdVfmadd231sd, //!< Instruction 'vfmadd231sd' {FMA|AVX512_F}.
+ kIdVfmadd231ss, //!< Instruction 'vfmadd231ss' {FMA|AVX512_F}.
+ kIdVfmaddpd, //!< Instruction 'vfmaddpd' {FMA4}.
+ kIdVfmaddps, //!< Instruction 'vfmaddps' {FMA4}.
+ kIdVfmaddsd, //!< Instruction 'vfmaddsd' {FMA4}.
+ kIdVfmaddss, //!< Instruction 'vfmaddss' {FMA4}.
+ kIdVfmaddsub132pd, //!< Instruction 'vfmaddsub132pd' {FMA|AVX512_F+VL}.
+ kIdVfmaddsub132ps, //!< Instruction 'vfmaddsub132ps' {FMA|AVX512_F+VL}.
+ kIdVfmaddsub213pd, //!< Instruction 'vfmaddsub213pd' {FMA|AVX512_F+VL}.
+ kIdVfmaddsub213ps, //!< Instruction 'vfmaddsub213ps' {FMA|AVX512_F+VL}.
+ kIdVfmaddsub231pd, //!< Instruction 'vfmaddsub231pd' {FMA|AVX512_F+VL}.
+ kIdVfmaddsub231ps, //!< Instruction 'vfmaddsub231ps' {FMA|AVX512_F+VL}.
+ kIdVfmaddsubpd, //!< Instruction 'vfmaddsubpd' {FMA4}.
+ kIdVfmaddsubps, //!< Instruction 'vfmaddsubps' {FMA4}.
+ kIdVfmsub132pd, //!< Instruction 'vfmsub132pd' {FMA|AVX512_F+VL}.
+ kIdVfmsub132ps, //!< Instruction 'vfmsub132ps' {FMA|AVX512_F+VL}.
+ kIdVfmsub132sd, //!< Instruction 'vfmsub132sd' {FMA|AVX512_F}.
+ kIdVfmsub132ss, //!< Instruction 'vfmsub132ss' {FMA|AVX512_F}.
+ kIdVfmsub213pd, //!< Instruction 'vfmsub213pd' {FMA|AVX512_F+VL}.
+ kIdVfmsub213ps, //!< Instruction 'vfmsub213ps' {FMA|AVX512_F+VL}.
+ kIdVfmsub213sd, //!< Instruction 'vfmsub213sd' {FMA|AVX512_F}.
+ kIdVfmsub213ss, //!< Instruction 'vfmsub213ss' {FMA|AVX512_F}.
+ kIdVfmsub231pd, //!< Instruction 'vfmsub231pd' {FMA|AVX512_F+VL}.
+ kIdVfmsub231ps, //!< Instruction 'vfmsub231ps' {FMA|AVX512_F+VL}.
+ kIdVfmsub231sd, //!< Instruction 'vfmsub231sd' {FMA|AVX512_F}.
+ kIdVfmsub231ss, //!< Instruction 'vfmsub231ss' {FMA|AVX512_F}.
+ kIdVfmsubadd132pd, //!< Instruction 'vfmsubadd132pd' {FMA|AVX512_F+VL}.
+ kIdVfmsubadd132ps, //!< Instruction 'vfmsubadd132ps' {FMA|AVX512_F+VL}.
+ kIdVfmsubadd213pd, //!< Instruction 'vfmsubadd213pd' {FMA|AVX512_F+VL}.
+ kIdVfmsubadd213ps, //!< Instruction 'vfmsubadd213ps' {FMA|AVX512_F+VL}.
+ kIdVfmsubadd231pd, //!< Instruction 'vfmsubadd231pd' {FMA|AVX512_F+VL}.
+ kIdVfmsubadd231ps, //!< Instruction 'vfmsubadd231ps' {FMA|AVX512_F+VL}.
+ kIdVfmsubaddpd, //!< Instruction 'vfmsubaddpd' {FMA4}.
+ kIdVfmsubaddps, //!< Instruction 'vfmsubaddps' {FMA4}.
+ kIdVfmsubpd, //!< Instruction 'vfmsubpd' {FMA4}.
+ kIdVfmsubps, //!< Instruction 'vfmsubps' {FMA4}.
+ kIdVfmsubsd, //!< Instruction 'vfmsubsd' {FMA4}.
+ kIdVfmsubss, //!< Instruction 'vfmsubss' {FMA4}.
+ kIdVfnmadd132pd, //!< Instruction 'vfnmadd132pd' {FMA|AVX512_F+VL}.
+ kIdVfnmadd132ps, //!< Instruction 'vfnmadd132ps' {FMA|AVX512_F+VL}.
+ kIdVfnmadd132sd, //!< Instruction 'vfnmadd132sd' {FMA|AVX512_F}.
+ kIdVfnmadd132ss, //!< Instruction 'vfnmadd132ss' {FMA|AVX512_F}.
+ kIdVfnmadd213pd, //!< Instruction 'vfnmadd213pd' {FMA|AVX512_F+VL}.
+ kIdVfnmadd213ps, //!< Instruction 'vfnmadd213ps' {FMA|AVX512_F+VL}.
+ kIdVfnmadd213sd, //!< Instruction 'vfnmadd213sd' {FMA|AVX512_F}.
+ kIdVfnmadd213ss, //!< Instruction 'vfnmadd213ss' {FMA|AVX512_F}.
+ kIdVfnmadd231pd, //!< Instruction 'vfnmadd231pd' {FMA|AVX512_F+VL}.
+ kIdVfnmadd231ps, //!< Instruction 'vfnmadd231ps' {FMA|AVX512_F+VL}.
+ kIdVfnmadd231sd, //!< Instruction 'vfnmadd231sd' {FMA|AVX512_F}.
+ kIdVfnmadd231ss, //!< Instruction 'vfnmadd231ss' {FMA|AVX512_F}.
+ kIdVfnmaddpd, //!< Instruction 'vfnmaddpd' {FMA4}.
+ kIdVfnmaddps, //!< Instruction 'vfnmaddps' {FMA4}.
+ kIdVfnmaddsd, //!< Instruction 'vfnmaddsd' {FMA4}.
+ kIdVfnmaddss, //!< Instruction 'vfnmaddss' {FMA4}.
+ kIdVfnmsub132pd, //!< Instruction 'vfnmsub132pd' {FMA|AVX512_F+VL}.
+ kIdVfnmsub132ps, //!< Instruction 'vfnmsub132ps' {FMA|AVX512_F+VL}.
+ kIdVfnmsub132sd, //!< Instruction 'vfnmsub132sd' {FMA|AVX512_F}.
+ kIdVfnmsub132ss, //!< Instruction 'vfnmsub132ss' {FMA|AVX512_F}.
+ kIdVfnmsub213pd, //!< Instruction 'vfnmsub213pd' {FMA|AVX512_F+VL}.
+ kIdVfnmsub213ps, //!< Instruction 'vfnmsub213ps' {FMA|AVX512_F+VL}.
+ kIdVfnmsub213sd, //!< Instruction 'vfnmsub213sd' {FMA|AVX512_F}.
+ kIdVfnmsub213ss, //!< Instruction 'vfnmsub213ss' {FMA|AVX512_F}.
+ kIdVfnmsub231pd, //!< Instruction 'vfnmsub231pd' {FMA|AVX512_F+VL}.
+ kIdVfnmsub231ps, //!< Instruction 'vfnmsub231ps' {FMA|AVX512_F+VL}.
+ kIdVfnmsub231sd, //!< Instruction 'vfnmsub231sd' {FMA|AVX512_F}.
+ kIdVfnmsub231ss, //!< Instruction 'vfnmsub231ss' {FMA|AVX512_F}.
+ kIdVfnmsubpd, //!< Instruction 'vfnmsubpd' {FMA4}.
+ kIdVfnmsubps, //!< Instruction 'vfnmsubps' {FMA4}.
+ kIdVfnmsubsd, //!< Instruction 'vfnmsubsd' {FMA4}.
+ kIdVfnmsubss, //!< Instruction 'vfnmsubss' {FMA4}.
+ kIdVfpclasspd, //!< Instruction 'vfpclasspd' {AVX512_DQ+VL}.
+ kIdVfpclassps, //!< Instruction 'vfpclassps' {AVX512_DQ+VL}.
+ kIdVfpclasssd, //!< Instruction 'vfpclasssd' {AVX512_DQ}.
+ kIdVfpclassss, //!< Instruction 'vfpclassss' {AVX512_DQ}.
+ kIdVfrczpd, //!< Instruction 'vfrczpd' {XOP}.
+ kIdVfrczps, //!< Instruction 'vfrczps' {XOP}.
+ kIdVfrczsd, //!< Instruction 'vfrczsd' {XOP}.
+ kIdVfrczss, //!< Instruction 'vfrczss' {XOP}.
+ kIdVgatherdpd, //!< Instruction 'vgatherdpd' {AVX2|AVX512_F+VL}.
+ kIdVgatherdps, //!< Instruction 'vgatherdps' {AVX2|AVX512_F+VL}.
+ kIdVgatherpf0dpd, //!< Instruction 'vgatherpf0dpd' {AVX512_PFI}.
+ kIdVgatherpf0dps, //!< Instruction 'vgatherpf0dps' {AVX512_PFI}.
+ kIdVgatherpf0qpd, //!< Instruction 'vgatherpf0qpd' {AVX512_PFI}.
+ kIdVgatherpf0qps, //!< Instruction 'vgatherpf0qps' {AVX512_PFI}.
+ kIdVgatherpf1dpd, //!< Instruction 'vgatherpf1dpd' {AVX512_PFI}.
+ kIdVgatherpf1dps, //!< Instruction 'vgatherpf1dps' {AVX512_PFI}.
+ kIdVgatherpf1qpd, //!< Instruction 'vgatherpf1qpd' {AVX512_PFI}.
+ kIdVgatherpf1qps, //!< Instruction 'vgatherpf1qps' {AVX512_PFI}.
+ kIdVgatherqpd, //!< Instruction 'vgatherqpd' {AVX2|AVX512_F+VL}.
+ kIdVgatherqps, //!< Instruction 'vgatherqps' {AVX2|AVX512_F+VL}.
+ kIdVgetexppd, //!< Instruction 'vgetexppd' {AVX512_F+VL}.
+ kIdVgetexpps, //!< Instruction 'vgetexpps' {AVX512_F+VL}.
+ kIdVgetexpsd, //!< Instruction 'vgetexpsd' {AVX512_F}.
+ kIdVgetexpss, //!< Instruction 'vgetexpss' {AVX512_F}.
+ kIdVgetmantpd, //!< Instruction 'vgetmantpd' {AVX512_F+VL}.
+ kIdVgetmantps, //!< Instruction 'vgetmantps' {AVX512_F+VL}.
+ kIdVgetmantsd, //!< Instruction 'vgetmantsd' {AVX512_F}.
+ kIdVgetmantss, //!< Instruction 'vgetmantss' {AVX512_F}.
+ kIdVgf2p8affineinvqb, //!< Instruction 'vgf2p8affineinvqb' {AVX|AVX512_F+VL & GFNI}.
+ kIdVgf2p8affineqb, //!< Instruction 'vgf2p8affineqb' {AVX|AVX512_F+VL & GFNI}.
+ kIdVgf2p8mulb, //!< Instruction 'vgf2p8mulb' {AVX|AVX512_F+VL & GFNI}.
+ kIdVhaddpd, //!< Instruction 'vhaddpd' {AVX}.
+ kIdVhaddps, //!< Instruction 'vhaddps' {AVX}.
+ kIdVhsubpd, //!< Instruction 'vhsubpd' {AVX}.
+ kIdVhsubps, //!< Instruction 'vhsubps' {AVX}.
+ kIdVinsertf128, //!< Instruction 'vinsertf128' {AVX}.
+ kIdVinsertf32x4, //!< Instruction 'vinsertf32x4' {AVX512_F+VL}.
+ kIdVinsertf32x8, //!< Instruction 'vinsertf32x8' {AVX512_DQ}.
+ kIdVinsertf64x2, //!< Instruction 'vinsertf64x2' {AVX512_DQ+VL}.
+ kIdVinsertf64x4, //!< Instruction 'vinsertf64x4' {AVX512_F}.
+ kIdVinserti128, //!< Instruction 'vinserti128' {AVX2}.
+ kIdVinserti32x4, //!< Instruction 'vinserti32x4' {AVX512_F+VL}.
+ kIdVinserti32x8, //!< Instruction 'vinserti32x8' {AVX512_DQ}.
+ kIdVinserti64x2, //!< Instruction 'vinserti64x2' {AVX512_DQ+VL}.
+ kIdVinserti64x4, //!< Instruction 'vinserti64x4' {AVX512_F}.
+ kIdVinsertps, //!< Instruction 'vinsertps' {AVX|AVX512_F}.
+ kIdVlddqu, //!< Instruction 'vlddqu' {AVX}.
+ kIdVldmxcsr, //!< Instruction 'vldmxcsr' {AVX}.
+ kIdVmaskmovdqu, //!< Instruction 'vmaskmovdqu' {AVX}.
+ kIdVmaskmovpd, //!< Instruction 'vmaskmovpd' {AVX}.
+ kIdVmaskmovps, //!< Instruction 'vmaskmovps' {AVX}.
+ kIdVmaxpd, //!< Instruction 'vmaxpd' {AVX|AVX512_F+VL}.
+ kIdVmaxps, //!< Instruction 'vmaxps' {AVX|AVX512_F+VL}.
+ kIdVmaxsd, //!< Instruction 'vmaxsd' {AVX|AVX512_F+VL}.
+ kIdVmaxss, //!< Instruction 'vmaxss' {AVX|AVX512_F+VL}.
+ kIdVmcall, //!< Instruction 'vmcall' {VMX}.
+ kIdVmclear, //!< Instruction 'vmclear' {VMX}.
+ kIdVmfunc, //!< Instruction 'vmfunc' {VMX}.
+ kIdVminpd, //!< Instruction 'vminpd' {AVX|AVX512_F+VL}.
+ kIdVminps, //!< Instruction 'vminps' {AVX|AVX512_F+VL}.
+ kIdVminsd, //!< Instruction 'vminsd' {AVX|AVX512_F+VL}.
+ kIdVminss, //!< Instruction 'vminss' {AVX|AVX512_F+VL}.
+ kIdVmlaunch, //!< Instruction 'vmlaunch' {VMX}.
+ kIdVmload, //!< Instruction 'vmload' {SVM}.
+ kIdVmmcall, //!< Instruction 'vmmcall' {SVM}.
+ kIdVmovapd, //!< Instruction 'vmovapd' {AVX|AVX512_F+VL}.
+ kIdVmovaps, //!< Instruction 'vmovaps' {AVX|AVX512_F+VL}.
+ kIdVmovd, //!< Instruction 'vmovd' {AVX|AVX512_F}.
+ kIdVmovddup, //!< Instruction 'vmovddup' {AVX|AVX512_F+VL}.
+ kIdVmovdqa, //!< Instruction 'vmovdqa' {AVX}.
+ kIdVmovdqa32, //!< Instruction 'vmovdqa32' {AVX512_F+VL}.
+ kIdVmovdqa64, //!< Instruction 'vmovdqa64' {AVX512_F+VL}.
+ kIdVmovdqu, //!< Instruction 'vmovdqu' {AVX}.
+ kIdVmovdqu16, //!< Instruction 'vmovdqu16' {AVX512_BW+VL}.
+ kIdVmovdqu32, //!< Instruction 'vmovdqu32' {AVX512_F+VL}.
+ kIdVmovdqu64, //!< Instruction 'vmovdqu64' {AVX512_F+VL}.
+ kIdVmovdqu8, //!< Instruction 'vmovdqu8' {AVX512_BW+VL}.
+ kIdVmovhlps, //!< Instruction 'vmovhlps' {AVX|AVX512_F}.
+ kIdVmovhpd, //!< Instruction 'vmovhpd' {AVX|AVX512_F}.
+ kIdVmovhps, //!< Instruction 'vmovhps' {AVX|AVX512_F}.
+ kIdVmovlhps, //!< Instruction 'vmovlhps' {AVX|AVX512_F}.
+ kIdVmovlpd, //!< Instruction 'vmovlpd' {AVX|AVX512_F}.
+ kIdVmovlps, //!< Instruction 'vmovlps' {AVX|AVX512_F}.
+ kIdVmovmskpd, //!< Instruction 'vmovmskpd' {AVX}.
+ kIdVmovmskps, //!< Instruction 'vmovmskps' {AVX}.
+ kIdVmovntdq, //!< Instruction 'vmovntdq' {AVX|AVX512_F+VL}.
+ kIdVmovntdqa, //!< Instruction 'vmovntdqa' {AVX|AVX2|AVX512_F+VL}.
+ kIdVmovntpd, //!< Instruction 'vmovntpd' {AVX|AVX512_F+VL}.
+ kIdVmovntps, //!< Instruction 'vmovntps' {AVX|AVX512_F+VL}.
+ kIdVmovq, //!< Instruction 'vmovq' {AVX|AVX512_F}.
+ kIdVmovsd, //!< Instruction 'vmovsd' {AVX|AVX512_F}.
+ kIdVmovshdup, //!< Instruction 'vmovshdup' {AVX|AVX512_F+VL}.
+ kIdVmovsldup, //!< Instruction 'vmovsldup' {AVX|AVX512_F+VL}.
+ kIdVmovss, //!< Instruction 'vmovss' {AVX|AVX512_F}.
+ kIdVmovupd, //!< Instruction 'vmovupd' {AVX|AVX512_F+VL}.
+ kIdVmovups, //!< Instruction 'vmovups' {AVX|AVX512_F+VL}.
+ kIdVmpsadbw, //!< Instruction 'vmpsadbw' {AVX|AVX2}.
+ kIdVmptrld, //!< Instruction 'vmptrld' {VMX}.
+ kIdVmptrst, //!< Instruction 'vmptrst' {VMX}.
+ kIdVmread, //!< Instruction 'vmread' {VMX}.
+ kIdVmresume, //!< Instruction 'vmresume' {VMX}.
+ kIdVmrun, //!< Instruction 'vmrun' {SVM}.
+ kIdVmsave, //!< Instruction 'vmsave' {SVM}.
+ kIdVmulpd, //!< Instruction 'vmulpd' {AVX|AVX512_F+VL}.
+ kIdVmulps, //!< Instruction 'vmulps' {AVX|AVX512_F+VL}.
+ kIdVmulsd, //!< Instruction 'vmulsd' {AVX|AVX512_F}.
+ kIdVmulss, //!< Instruction 'vmulss' {AVX|AVX512_F}.
+ kIdVmwrite, //!< Instruction 'vmwrite' {VMX}.
+ kIdVmxon, //!< Instruction 'vmxon' {VMX}.
+ kIdVorpd, //!< Instruction 'vorpd' {AVX|AVX512_DQ+VL}.
+ kIdVorps, //!< Instruction 'vorps' {AVX|AVX512_DQ+VL}.
+ kIdVp4dpwssd, //!< Instruction 'vp4dpwssd' {AVX512_4VNNIW}.
+ kIdVp4dpwssds, //!< Instruction 'vp4dpwssds' {AVX512_4VNNIW}.
+ kIdVpabsb, //!< Instruction 'vpabsb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpabsd, //!< Instruction 'vpabsd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpabsq, //!< Instruction 'vpabsq' {AVX512_F+VL}.
+ kIdVpabsw, //!< Instruction 'vpabsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpackssdw, //!< Instruction 'vpackssdw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpacksswb, //!< Instruction 'vpacksswb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpackusdw, //!< Instruction 'vpackusdw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpackuswb, //!< Instruction 'vpackuswb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddb, //!< Instruction 'vpaddb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddd, //!< Instruction 'vpaddd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpaddq, //!< Instruction 'vpaddq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpaddsb, //!< Instruction 'vpaddsb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddsw, //!< Instruction 'vpaddsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddusb, //!< Instruction 'vpaddusb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddusw, //!< Instruction 'vpaddusw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpaddw, //!< Instruction 'vpaddw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpalignr, //!< Instruction 'vpalignr' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpand, //!< Instruction 'vpand' {AVX|AVX2}.
+ kIdVpandd, //!< Instruction 'vpandd' {AVX512_F+VL}.
+ kIdVpandn, //!< Instruction 'vpandn' {AVX|AVX2}.
+ kIdVpandnd, //!< Instruction 'vpandnd' {AVX512_F+VL}.
+ kIdVpandnq, //!< Instruction 'vpandnq' {AVX512_F+VL}.
+ kIdVpandq, //!< Instruction 'vpandq' {AVX512_F+VL}.
+ kIdVpavgb, //!< Instruction 'vpavgb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpavgw, //!< Instruction 'vpavgw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpblendd, //!< Instruction 'vpblendd' {AVX2}.
+ kIdVpblendvb, //!< Instruction 'vpblendvb' {AVX|AVX2}.
+ kIdVpblendw, //!< Instruction 'vpblendw' {AVX|AVX2}.
+ kIdVpbroadcastb, //!< Instruction 'vpbroadcastb' {AVX2|AVX512_BW+VL}.
+ kIdVpbroadcastd, //!< Instruction 'vpbroadcastd' {AVX2|AVX512_F+VL}.
+ kIdVpbroadcastmb2d, //!< Instruction 'vpbroadcastmb2d' {AVX512_CDI+VL}.
+ kIdVpbroadcastmb2q, //!< Instruction 'vpbroadcastmb2q' {AVX512_CDI+VL}.
+ kIdVpbroadcastq, //!< Instruction 'vpbroadcastq' {AVX2|AVX512_F+VL}.
+ kIdVpbroadcastw, //!< Instruction 'vpbroadcastw' {AVX2|AVX512_BW+VL}.
+ kIdVpclmulqdq, //!< Instruction 'vpclmulqdq' {AVX|AVX512_F+VL & PCLMULQDQ|VPCLMULQDQ}.
+ kIdVpcmov, //!< Instruction 'vpcmov' {XOP}.
+ kIdVpcmpb, //!< Instruction 'vpcmpb' {AVX512_BW+VL}.
+ kIdVpcmpd, //!< Instruction 'vpcmpd' {AVX512_F+VL}.
+ kIdVpcmpeqb, //!< Instruction 'vpcmpeqb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpcmpeqd, //!< Instruction 'vpcmpeqd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpcmpeqq, //!< Instruction 'vpcmpeqq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpcmpeqw, //!< Instruction 'vpcmpeqw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpcmpestri, //!< Instruction 'vpcmpestri' {AVX}.
+ kIdVpcmpestrm, //!< Instruction 'vpcmpestrm' {AVX}.
+ kIdVpcmpgtb, //!< Instruction 'vpcmpgtb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpcmpgtd, //!< Instruction 'vpcmpgtd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpcmpgtq, //!< Instruction 'vpcmpgtq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpcmpgtw, //!< Instruction 'vpcmpgtw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpcmpistri, //!< Instruction 'vpcmpistri' {AVX}.
+ kIdVpcmpistrm, //!< Instruction 'vpcmpistrm' {AVX}.
+ kIdVpcmpq, //!< Instruction 'vpcmpq' {AVX512_F+VL}.
+ kIdVpcmpub, //!< Instruction 'vpcmpub' {AVX512_BW+VL}.
+ kIdVpcmpud, //!< Instruction 'vpcmpud' {AVX512_F+VL}.
+ kIdVpcmpuq, //!< Instruction 'vpcmpuq' {AVX512_F+VL}.
+ kIdVpcmpuw, //!< Instruction 'vpcmpuw' {AVX512_BW+VL}.
+ kIdVpcmpw, //!< Instruction 'vpcmpw' {AVX512_BW+VL}.
+ kIdVpcomb, //!< Instruction 'vpcomb' {XOP}.
+ kIdVpcomd, //!< Instruction 'vpcomd' {XOP}.
+ kIdVpcompressb, //!< Instruction 'vpcompressb' {AVX512_VBMI2+VL}.
+ kIdVpcompressd, //!< Instruction 'vpcompressd' {AVX512_F+VL}.
+ kIdVpcompressq, //!< Instruction 'vpcompressq' {AVX512_F+VL}.
+ kIdVpcompressw, //!< Instruction 'vpcompressw' {AVX512_VBMI2+VL}.
+ kIdVpcomq, //!< Instruction 'vpcomq' {XOP}.
+ kIdVpcomub, //!< Instruction 'vpcomub' {XOP}.
+ kIdVpcomud, //!< Instruction 'vpcomud' {XOP}.
+ kIdVpcomuq, //!< Instruction 'vpcomuq' {XOP}.
+ kIdVpcomuw, //!< Instruction 'vpcomuw' {XOP}.
+ kIdVpcomw, //!< Instruction 'vpcomw' {XOP}.
+ kIdVpconflictd, //!< Instruction 'vpconflictd' {AVX512_CDI+VL}.
+ kIdVpconflictq, //!< Instruction 'vpconflictq' {AVX512_CDI+VL}.
+ kIdVpdpbusd, //!< Instruction 'vpdpbusd' {AVX512_VNNI+VL}.
+ kIdVpdpbusds, //!< Instruction 'vpdpbusds' {AVX512_VNNI+VL}.
+ kIdVpdpwssd, //!< Instruction 'vpdpwssd' {AVX512_VNNI+VL}.
+ kIdVpdpwssds, //!< Instruction 'vpdpwssds' {AVX512_VNNI+VL}.
+ kIdVperm2f128, //!< Instruction 'vperm2f128' {AVX}.
+ kIdVperm2i128, //!< Instruction 'vperm2i128' {AVX2}.
+ kIdVpermb, //!< Instruction 'vpermb' {AVX512_VBMI+VL}.
+ kIdVpermd, //!< Instruction 'vpermd' {AVX2|AVX512_F+VL}.
+ kIdVpermi2b, //!< Instruction 'vpermi2b' {AVX512_VBMI+VL}.
+ kIdVpermi2d, //!< Instruction 'vpermi2d' {AVX512_F+VL}.
+ kIdVpermi2pd, //!< Instruction 'vpermi2pd' {AVX512_F+VL}.
+ kIdVpermi2ps, //!< Instruction 'vpermi2ps' {AVX512_F+VL}.
+ kIdVpermi2q, //!< Instruction 'vpermi2q' {AVX512_F+VL}.
+ kIdVpermi2w, //!< Instruction 'vpermi2w' {AVX512_BW+VL}.
+ kIdVpermil2pd, //!< Instruction 'vpermil2pd' {XOP}.
+ kIdVpermil2ps, //!< Instruction 'vpermil2ps' {XOP}.
+ kIdVpermilpd, //!< Instruction 'vpermilpd' {AVX|AVX512_F+VL}.
+ kIdVpermilps, //!< Instruction 'vpermilps' {AVX|AVX512_F+VL}.
+ kIdVpermpd, //!< Instruction 'vpermpd' {AVX2|AVX512_F+VL}.
+ kIdVpermps, //!< Instruction 'vpermps' {AVX2|AVX512_F+VL}.
+ kIdVpermq, //!< Instruction 'vpermq' {AVX2|AVX512_F+VL}.
+ kIdVpermt2b, //!< Instruction 'vpermt2b' {AVX512_VBMI+VL}.
+ kIdVpermt2d, //!< Instruction 'vpermt2d' {AVX512_F+VL}.
+ kIdVpermt2pd, //!< Instruction 'vpermt2pd' {AVX512_F+VL}.
+ kIdVpermt2ps, //!< Instruction 'vpermt2ps' {AVX512_F+VL}.
+ kIdVpermt2q, //!< Instruction 'vpermt2q' {AVX512_F+VL}.
+ kIdVpermt2w, //!< Instruction 'vpermt2w' {AVX512_BW+VL}.
+ kIdVpermw, //!< Instruction 'vpermw' {AVX512_BW+VL}.
+ kIdVpexpandb, //!< Instruction 'vpexpandb' {AVX512_VBMI2+VL}.
+ kIdVpexpandd, //!< Instruction 'vpexpandd' {AVX512_F+VL}.
+ kIdVpexpandq, //!< Instruction 'vpexpandq' {AVX512_F+VL}.
+ kIdVpexpandw, //!< Instruction 'vpexpandw' {AVX512_VBMI2+VL}.
+ kIdVpextrb, //!< Instruction 'vpextrb' {AVX|AVX512_BW}.
+ kIdVpextrd, //!< Instruction 'vpextrd' {AVX|AVX512_DQ}.
+ kIdVpextrq, //!< Instruction 'vpextrq' {AVX|AVX512_DQ} (X64).
+ kIdVpextrw, //!< Instruction 'vpextrw' {AVX|AVX512_BW}.
+ kIdVpgatherdd, //!< Instruction 'vpgatherdd' {AVX2|AVX512_F+VL}.
+ kIdVpgatherdq, //!< Instruction 'vpgatherdq' {AVX2|AVX512_F+VL}.
+ kIdVpgatherqd, //!< Instruction 'vpgatherqd' {AVX2|AVX512_F+VL}.
+ kIdVpgatherqq, //!< Instruction 'vpgatherqq' {AVX2|AVX512_F+VL}.
+ kIdVphaddbd, //!< Instruction 'vphaddbd' {XOP}.
+ kIdVphaddbq, //!< Instruction 'vphaddbq' {XOP}.
+ kIdVphaddbw, //!< Instruction 'vphaddbw' {XOP}.
+ kIdVphaddd, //!< Instruction 'vphaddd' {AVX|AVX2}.
+ kIdVphadddq, //!< Instruction 'vphadddq' {XOP}.
+ kIdVphaddsw, //!< Instruction 'vphaddsw' {AVX|AVX2}.
+ kIdVphaddubd, //!< Instruction 'vphaddubd' {XOP}.
+ kIdVphaddubq, //!< Instruction 'vphaddubq' {XOP}.
+ kIdVphaddubw, //!< Instruction 'vphaddubw' {XOP}.
+ kIdVphaddudq, //!< Instruction 'vphaddudq' {XOP}.
+ kIdVphadduwd, //!< Instruction 'vphadduwd' {XOP}.
+ kIdVphadduwq, //!< Instruction 'vphadduwq' {XOP}.
+ kIdVphaddw, //!< Instruction 'vphaddw' {AVX|AVX2}.
+ kIdVphaddwd, //!< Instruction 'vphaddwd' {XOP}.
+ kIdVphaddwq, //!< Instruction 'vphaddwq' {XOP}.
+ kIdVphminposuw, //!< Instruction 'vphminposuw' {AVX}.
+ kIdVphsubbw, //!< Instruction 'vphsubbw' {XOP}.
+ kIdVphsubd, //!< Instruction 'vphsubd' {AVX|AVX2}.
+ kIdVphsubdq, //!< Instruction 'vphsubdq' {XOP}.
+ kIdVphsubsw, //!< Instruction 'vphsubsw' {AVX|AVX2}.
+ kIdVphsubw, //!< Instruction 'vphsubw' {AVX|AVX2}.
+ kIdVphsubwd, //!< Instruction 'vphsubwd' {XOP}.
+ kIdVpinsrb, //!< Instruction 'vpinsrb' {AVX|AVX512_BW}.
+ kIdVpinsrd, //!< Instruction 'vpinsrd' {AVX|AVX512_DQ}.
+ kIdVpinsrq, //!< Instruction 'vpinsrq' {AVX|AVX512_DQ} (X64).
+ kIdVpinsrw, //!< Instruction 'vpinsrw' {AVX|AVX512_BW}.
+ kIdVplzcntd, //!< Instruction 'vplzcntd' {AVX512_CDI+VL}.
+ kIdVplzcntq, //!< Instruction 'vplzcntq' {AVX512_CDI+VL}.
+ kIdVpmacsdd, //!< Instruction 'vpmacsdd' {XOP}.
+ kIdVpmacsdqh, //!< Instruction 'vpmacsdqh' {XOP}.
+ kIdVpmacsdql, //!< Instruction 'vpmacsdql' {XOP}.
+ kIdVpmacssdd, //!< Instruction 'vpmacssdd' {XOP}.
+ kIdVpmacssdqh, //!< Instruction 'vpmacssdqh' {XOP}.
+ kIdVpmacssdql, //!< Instruction 'vpmacssdql' {XOP}.
+ kIdVpmacsswd, //!< Instruction 'vpmacsswd' {XOP}.
+ kIdVpmacssww, //!< Instruction 'vpmacssww' {XOP}.
+ kIdVpmacswd, //!< Instruction 'vpmacswd' {XOP}.
+ kIdVpmacsww, //!< Instruction 'vpmacsww' {XOP}.
+ kIdVpmadcsswd, //!< Instruction 'vpmadcsswd' {XOP}.
+ kIdVpmadcswd, //!< Instruction 'vpmadcswd' {XOP}.
+ kIdVpmadd52huq, //!< Instruction 'vpmadd52huq' {AVX512_IFMA+VL}.
+ kIdVpmadd52luq, //!< Instruction 'vpmadd52luq' {AVX512_IFMA+VL}.
+ kIdVpmaddubsw, //!< Instruction 'vpmaddubsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmaddwd, //!< Instruction 'vpmaddwd' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmaskmovd, //!< Instruction 'vpmaskmovd' {AVX2}.
+ kIdVpmaskmovq, //!< Instruction 'vpmaskmovq' {AVX2}.
+ kIdVpmaxsb, //!< Instruction 'vpmaxsb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmaxsd, //!< Instruction 'vpmaxsd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmaxsq, //!< Instruction 'vpmaxsq' {AVX512_F+VL}.
+ kIdVpmaxsw, //!< Instruction 'vpmaxsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmaxub, //!< Instruction 'vpmaxub' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmaxud, //!< Instruction 'vpmaxud' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmaxuq, //!< Instruction 'vpmaxuq' {AVX512_F+VL}.
+ kIdVpmaxuw, //!< Instruction 'vpmaxuw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpminsb, //!< Instruction 'vpminsb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpminsd, //!< Instruction 'vpminsd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpminsq, //!< Instruction 'vpminsq' {AVX512_F+VL}.
+ kIdVpminsw, //!< Instruction 'vpminsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpminub, //!< Instruction 'vpminub' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpminud, //!< Instruction 'vpminud' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpminuq, //!< Instruction 'vpminuq' {AVX512_F+VL}.
+ kIdVpminuw, //!< Instruction 'vpminuw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmovb2m, //!< Instruction 'vpmovb2m' {AVX512_BW+VL}.
+ kIdVpmovd2m, //!< Instruction 'vpmovd2m' {AVX512_DQ+VL}.
+ kIdVpmovdb, //!< Instruction 'vpmovdb' {AVX512_F+VL}.
+ kIdVpmovdw, //!< Instruction 'vpmovdw' {AVX512_F+VL}.
+ kIdVpmovm2b, //!< Instruction 'vpmovm2b' {AVX512_BW+VL}.
+ kIdVpmovm2d, //!< Instruction 'vpmovm2d' {AVX512_DQ+VL}.
+ kIdVpmovm2q, //!< Instruction 'vpmovm2q' {AVX512_DQ+VL}.
+ kIdVpmovm2w, //!< Instruction 'vpmovm2w' {AVX512_BW+VL}.
+ kIdVpmovmskb, //!< Instruction 'vpmovmskb' {AVX|AVX2}.
+ kIdVpmovq2m, //!< Instruction 'vpmovq2m' {AVX512_DQ+VL}.
+ kIdVpmovqb, //!< Instruction 'vpmovqb' {AVX512_F+VL}.
+ kIdVpmovqd, //!< Instruction 'vpmovqd' {AVX512_F+VL}.
+ kIdVpmovqw, //!< Instruction 'vpmovqw' {AVX512_F+VL}.
+ kIdVpmovsdb, //!< Instruction 'vpmovsdb' {AVX512_F+VL}.
+ kIdVpmovsdw, //!< Instruction 'vpmovsdw' {AVX512_F+VL}.
+ kIdVpmovsqb, //!< Instruction 'vpmovsqb' {AVX512_F+VL}.
+ kIdVpmovsqd, //!< Instruction 'vpmovsqd' {AVX512_F+VL}.
+ kIdVpmovsqw, //!< Instruction 'vpmovsqw' {AVX512_F+VL}.
+ kIdVpmovswb, //!< Instruction 'vpmovswb' {AVX512_BW+VL}.
+ kIdVpmovsxbd, //!< Instruction 'vpmovsxbd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovsxbq, //!< Instruction 'vpmovsxbq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovsxbw, //!< Instruction 'vpmovsxbw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmovsxdq, //!< Instruction 'vpmovsxdq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovsxwd, //!< Instruction 'vpmovsxwd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovsxwq, //!< Instruction 'vpmovsxwq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovusdb, //!< Instruction 'vpmovusdb' {AVX512_F+VL}.
+ kIdVpmovusdw, //!< Instruction 'vpmovusdw' {AVX512_F+VL}.
+ kIdVpmovusqb, //!< Instruction 'vpmovusqb' {AVX512_F+VL}.
+ kIdVpmovusqd, //!< Instruction 'vpmovusqd' {AVX512_F+VL}.
+ kIdVpmovusqw, //!< Instruction 'vpmovusqw' {AVX512_F+VL}.
+ kIdVpmovuswb, //!< Instruction 'vpmovuswb' {AVX512_BW+VL}.
+ kIdVpmovw2m, //!< Instruction 'vpmovw2m' {AVX512_BW+VL}.
+ kIdVpmovwb, //!< Instruction 'vpmovwb' {AVX512_BW+VL}.
+ kIdVpmovzxbd, //!< Instruction 'vpmovzxbd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovzxbq, //!< Instruction 'vpmovzxbq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovzxbw, //!< Instruction 'vpmovzxbw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmovzxdq, //!< Instruction 'vpmovzxdq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovzxwd, //!< Instruction 'vpmovzxwd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmovzxwq, //!< Instruction 'vpmovzxwq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmuldq, //!< Instruction 'vpmuldq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmulhrsw, //!< Instruction 'vpmulhrsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmulhuw, //!< Instruction 'vpmulhuw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmulhw, //!< Instruction 'vpmulhw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmulld, //!< Instruction 'vpmulld' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpmullq, //!< Instruction 'vpmullq' {AVX512_DQ+VL}.
+ kIdVpmullw, //!< Instruction 'vpmullw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpmultishiftqb, //!< Instruction 'vpmultishiftqb' {AVX512_VBMI+VL}.
+ kIdVpmuludq, //!< Instruction 'vpmuludq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpopcntb, //!< Instruction 'vpopcntb' {AVX512_BITALG+VL}.
+ kIdVpopcntd, //!< Instruction 'vpopcntd' {AVX512_VPOPCNTDQ+VL}.
+ kIdVpopcntq, //!< Instruction 'vpopcntq' {AVX512_VPOPCNTDQ+VL}.
+ kIdVpopcntw, //!< Instruction 'vpopcntw' {AVX512_BITALG+VL}.
+ kIdVpor, //!< Instruction 'vpor' {AVX|AVX2}.
+ kIdVpord, //!< Instruction 'vpord' {AVX512_F+VL}.
+ kIdVporq, //!< Instruction 'vporq' {AVX512_F+VL}.
+ kIdVpperm, //!< Instruction 'vpperm' {XOP}.
+ kIdVprold, //!< Instruction 'vprold' {AVX512_F+VL}.
+ kIdVprolq, //!< Instruction 'vprolq' {AVX512_F+VL}.
+ kIdVprolvd, //!< Instruction 'vprolvd' {AVX512_F+VL}.
+ kIdVprolvq, //!< Instruction 'vprolvq' {AVX512_F+VL}.
+ kIdVprord, //!< Instruction 'vprord' {AVX512_F+VL}.
+ kIdVprorq, //!< Instruction 'vprorq' {AVX512_F+VL}.
+ kIdVprorvd, //!< Instruction 'vprorvd' {AVX512_F+VL}.
+ kIdVprorvq, //!< Instruction 'vprorvq' {AVX512_F+VL}.
+ kIdVprotb, //!< Instruction 'vprotb' {XOP}.
+ kIdVprotd, //!< Instruction 'vprotd' {XOP}.
+ kIdVprotq, //!< Instruction 'vprotq' {XOP}.
+ kIdVprotw, //!< Instruction 'vprotw' {XOP}.
+ kIdVpsadbw, //!< Instruction 'vpsadbw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpscatterdd, //!< Instruction 'vpscatterdd' {AVX512_F+VL}.
+ kIdVpscatterdq, //!< Instruction 'vpscatterdq' {AVX512_F+VL}.
+ kIdVpscatterqd, //!< Instruction 'vpscatterqd' {AVX512_F+VL}.
+ kIdVpscatterqq, //!< Instruction 'vpscatterqq' {AVX512_F+VL}.
+ kIdVpshab, //!< Instruction 'vpshab' {XOP}.
+ kIdVpshad, //!< Instruction 'vpshad' {XOP}.
+ kIdVpshaq, //!< Instruction 'vpshaq' {XOP}.
+ kIdVpshaw, //!< Instruction 'vpshaw' {XOP}.
+ kIdVpshlb, //!< Instruction 'vpshlb' {XOP}.
+ kIdVpshld, //!< Instruction 'vpshld' {XOP}.
+ kIdVpshldd, //!< Instruction 'vpshldd' {AVX512_VBMI2+VL}.
+ kIdVpshldq, //!< Instruction 'vpshldq' {AVX512_VBMI2+VL}.
+ kIdVpshldvd, //!< Instruction 'vpshldvd' {AVX512_VBMI2+VL}.
+ kIdVpshldvq, //!< Instruction 'vpshldvq' {AVX512_VBMI2+VL}.
+ kIdVpshldvw, //!< Instruction 'vpshldvw' {AVX512_VBMI2+VL}.
+ kIdVpshldw, //!< Instruction 'vpshldw' {AVX512_VBMI2+VL}.
+ kIdVpshlq, //!< Instruction 'vpshlq' {XOP}.
+ kIdVpshlw, //!< Instruction 'vpshlw' {XOP}.
+ kIdVpshrdd, //!< Instruction 'vpshrdd' {AVX512_VBMI2+VL}.
+ kIdVpshrdq, //!< Instruction 'vpshrdq' {AVX512_VBMI2+VL}.
+ kIdVpshrdvd, //!< Instruction 'vpshrdvd' {AVX512_VBMI2+VL}.
+ kIdVpshrdvq, //!< Instruction 'vpshrdvq' {AVX512_VBMI2+VL}.
+ kIdVpshrdvw, //!< Instruction 'vpshrdvw' {AVX512_VBMI2+VL}.
+ kIdVpshrdw, //!< Instruction 'vpshrdw' {AVX512_VBMI2+VL}.
+ kIdVpshufb, //!< Instruction 'vpshufb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpshufbitqmb, //!< Instruction 'vpshufbitqmb' {AVX512_BITALG+VL}.
+ kIdVpshufd, //!< Instruction 'vpshufd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpshufhw, //!< Instruction 'vpshufhw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpshuflw, //!< Instruction 'vpshuflw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsignb, //!< Instruction 'vpsignb' {AVX|AVX2}.
+ kIdVpsignd, //!< Instruction 'vpsignd' {AVX|AVX2}.
+ kIdVpsignw, //!< Instruction 'vpsignw' {AVX|AVX2}.
+ kIdVpslld, //!< Instruction 'vpslld' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpslldq, //!< Instruction 'vpslldq' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsllq, //!< Instruction 'vpsllq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsllvd, //!< Instruction 'vpsllvd' {AVX2|AVX512_F+VL}.
+ kIdVpsllvq, //!< Instruction 'vpsllvq' {AVX2|AVX512_F+VL}.
+ kIdVpsllvw, //!< Instruction 'vpsllvw' {AVX512_BW+VL}.
+ kIdVpsllw, //!< Instruction 'vpsllw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsrad, //!< Instruction 'vpsrad' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsraq, //!< Instruction 'vpsraq' {AVX512_F+VL}.
+ kIdVpsravd, //!< Instruction 'vpsravd' {AVX2|AVX512_F+VL}.
+ kIdVpsravq, //!< Instruction 'vpsravq' {AVX512_F+VL}.
+ kIdVpsravw, //!< Instruction 'vpsravw' {AVX512_BW+VL}.
+ kIdVpsraw, //!< Instruction 'vpsraw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsrld, //!< Instruction 'vpsrld' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsrldq, //!< Instruction 'vpsrldq' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsrlq, //!< Instruction 'vpsrlq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsrlvd, //!< Instruction 'vpsrlvd' {AVX2|AVX512_F+VL}.
+ kIdVpsrlvq, //!< Instruction 'vpsrlvq' {AVX2|AVX512_F+VL}.
+ kIdVpsrlvw, //!< Instruction 'vpsrlvw' {AVX512_BW+VL}.
+ kIdVpsrlw, //!< Instruction 'vpsrlw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubb, //!< Instruction 'vpsubb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubd, //!< Instruction 'vpsubd' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsubq, //!< Instruction 'vpsubq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpsubsb, //!< Instruction 'vpsubsb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubsw, //!< Instruction 'vpsubsw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubusb, //!< Instruction 'vpsubusb' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubusw, //!< Instruction 'vpsubusw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpsubw, //!< Instruction 'vpsubw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpternlogd, //!< Instruction 'vpternlogd' {AVX512_F+VL}.
+ kIdVpternlogq, //!< Instruction 'vpternlogq' {AVX512_F+VL}.
+ kIdVptest, //!< Instruction 'vptest' {AVX}.
+ kIdVptestmb, //!< Instruction 'vptestmb' {AVX512_BW+VL}.
+ kIdVptestmd, //!< Instruction 'vptestmd' {AVX512_F+VL}.
+ kIdVptestmq, //!< Instruction 'vptestmq' {AVX512_F+VL}.
+ kIdVptestmw, //!< Instruction 'vptestmw' {AVX512_BW+VL}.
+ kIdVptestnmb, //!< Instruction 'vptestnmb' {AVX512_BW+VL}.
+ kIdVptestnmd, //!< Instruction 'vptestnmd' {AVX512_F+VL}.
+ kIdVptestnmq, //!< Instruction 'vptestnmq' {AVX512_F+VL}.
+ kIdVptestnmw, //!< Instruction 'vptestnmw' {AVX512_BW+VL}.
+ kIdVpunpckhbw, //!< Instruction 'vpunpckhbw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpunpckhdq, //!< Instruction 'vpunpckhdq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpunpckhqdq, //!< Instruction 'vpunpckhqdq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpunpckhwd, //!< Instruction 'vpunpckhwd' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpunpcklbw, //!< Instruction 'vpunpcklbw' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpunpckldq, //!< Instruction 'vpunpckldq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpunpcklqdq, //!< Instruction 'vpunpcklqdq' {AVX|AVX2|AVX512_F+VL}.
+ kIdVpunpcklwd, //!< Instruction 'vpunpcklwd' {AVX|AVX2|AVX512_BW+VL}.
+ kIdVpxor, //!< Instruction 'vpxor' {AVX|AVX2}.
+ kIdVpxord, //!< Instruction 'vpxord' {AVX512_F+VL}.
+ kIdVpxorq, //!< Instruction 'vpxorq' {AVX512_F+VL}.
+ kIdVrangepd, //!< Instruction 'vrangepd' {AVX512_DQ+VL}.
+ kIdVrangeps, //!< Instruction 'vrangeps' {AVX512_DQ+VL}.
+ kIdVrangesd, //!< Instruction 'vrangesd' {AVX512_DQ}.
+ kIdVrangess, //!< Instruction 'vrangess' {AVX512_DQ}.
+ kIdVrcp14pd, //!< Instruction 'vrcp14pd' {AVX512_F+VL}.
+ kIdVrcp14ps, //!< Instruction 'vrcp14ps' {AVX512_F+VL}.
+ kIdVrcp14sd, //!< Instruction 'vrcp14sd' {AVX512_F}.
+ kIdVrcp14ss, //!< Instruction 'vrcp14ss' {AVX512_F}.
+ kIdVrcp28pd, //!< Instruction 'vrcp28pd' {AVX512_ERI}.
+ kIdVrcp28ps, //!< Instruction 'vrcp28ps' {AVX512_ERI}.
+ kIdVrcp28sd, //!< Instruction 'vrcp28sd' {AVX512_ERI}.
+ kIdVrcp28ss, //!< Instruction 'vrcp28ss' {AVX512_ERI}.
+ kIdVrcpps, //!< Instruction 'vrcpps' {AVX}.
+ kIdVrcpss, //!< Instruction 'vrcpss' {AVX}.
+ kIdVreducepd, //!< Instruction 'vreducepd' {AVX512_DQ+VL}.
+ kIdVreduceps, //!< Instruction 'vreduceps' {AVX512_DQ+VL}.
+ kIdVreducesd, //!< Instruction 'vreducesd' {AVX512_DQ}.
+ kIdVreducess, //!< Instruction 'vreducess' {AVX512_DQ}.
+ kIdVrndscalepd, //!< Instruction 'vrndscalepd' {AVX512_F+VL}.
+ kIdVrndscaleps, //!< Instruction 'vrndscaleps' {AVX512_F+VL}.
+ kIdVrndscalesd, //!< Instruction 'vrndscalesd' {AVX512_F}.
+ kIdVrndscaless, //!< Instruction 'vrndscaless' {AVX512_F}.
+ kIdVroundpd, //!< Instruction 'vroundpd' {AVX}.
+ kIdVroundps, //!< Instruction 'vroundps' {AVX}.
+ kIdVroundsd, //!< Instruction 'vroundsd' {AVX}.
+ kIdVroundss, //!< Instruction 'vroundss' {AVX}.
+ kIdVrsqrt14pd, //!< Instruction 'vrsqrt14pd' {AVX512_F+VL}.
+ kIdVrsqrt14ps, //!< Instruction 'vrsqrt14ps' {AVX512_F+VL}.
+ kIdVrsqrt14sd, //!< Instruction 'vrsqrt14sd' {AVX512_F}.
+ kIdVrsqrt14ss, //!< Instruction 'vrsqrt14ss' {AVX512_F}.
+ kIdVrsqrt28pd, //!< Instruction 'vrsqrt28pd' {AVX512_ERI}.
+ kIdVrsqrt28ps, //!< Instruction 'vrsqrt28ps' {AVX512_ERI}.
+ kIdVrsqrt28sd, //!< Instruction 'vrsqrt28sd' {AVX512_ERI}.
+ kIdVrsqrt28ss, //!< Instruction 'vrsqrt28ss' {AVX512_ERI}.
+ kIdVrsqrtps, //!< Instruction 'vrsqrtps' {AVX}.
+ kIdVrsqrtss, //!< Instruction 'vrsqrtss' {AVX}.
+ kIdVscalefpd, //!< Instruction 'vscalefpd' {AVX512_F+VL}.
+ kIdVscalefps, //!< Instruction 'vscalefps' {AVX512_F+VL}.
+ kIdVscalefsd, //!< Instruction 'vscalefsd' {AVX512_F}.
+ kIdVscalefss, //!< Instruction 'vscalefss' {AVX512_F}.
+ kIdVscatterdpd, //!< Instruction 'vscatterdpd' {AVX512_F+VL}.
+ kIdVscatterdps, //!< Instruction 'vscatterdps' {AVX512_F+VL}.
+ kIdVscatterpf0dpd, //!< Instruction 'vscatterpf0dpd' {AVX512_PFI}.
+ kIdVscatterpf0dps, //!< Instruction 'vscatterpf0dps' {AVX512_PFI}.
+ kIdVscatterpf0qpd, //!< Instruction 'vscatterpf0qpd' {AVX512_PFI}.
+ kIdVscatterpf0qps, //!< Instruction 'vscatterpf0qps' {AVX512_PFI}.
+ kIdVscatterpf1dpd, //!< Instruction 'vscatterpf1dpd' {AVX512_PFI}.
+ kIdVscatterpf1dps, //!< Instruction 'vscatterpf1dps' {AVX512_PFI}.
+ kIdVscatterpf1qpd, //!< Instruction 'vscatterpf1qpd' {AVX512_PFI}.
+ kIdVscatterpf1qps, //!< Instruction 'vscatterpf1qps' {AVX512_PFI}.
+ kIdVscatterqpd, //!< Instruction 'vscatterqpd' {AVX512_F+VL}.
+ kIdVscatterqps, //!< Instruction 'vscatterqps' {AVX512_F+VL}.
+ kIdVshuff32x4, //!< Instruction 'vshuff32x4' {AVX512_F+VL}.
+ kIdVshuff64x2, //!< Instruction 'vshuff64x2' {AVX512_F+VL}.
+ kIdVshufi32x4, //!< Instruction 'vshufi32x4' {AVX512_F+VL}.
+ kIdVshufi64x2, //!< Instruction 'vshufi64x2' {AVX512_F+VL}.
+ kIdVshufpd, //!< Instruction 'vshufpd' {AVX|AVX512_F+VL}.
+ kIdVshufps, //!< Instruction 'vshufps' {AVX|AVX512_F+VL}.
+ kIdVsqrtpd, //!< Instruction 'vsqrtpd' {AVX|AVX512_F+VL}.
+ kIdVsqrtps, //!< Instruction 'vsqrtps' {AVX|AVX512_F+VL}.
+ kIdVsqrtsd, //!< Instruction 'vsqrtsd' {AVX|AVX512_F}.
+ kIdVsqrtss, //!< Instruction 'vsqrtss' {AVX|AVX512_F}.
+ kIdVstmxcsr, //!< Instruction 'vstmxcsr' {AVX}.
+ kIdVsubpd, //!< Instruction 'vsubpd' {AVX|AVX512_F+VL}.
+ kIdVsubps, //!< Instruction 'vsubps' {AVX|AVX512_F+VL}.
+ kIdVsubsd, //!< Instruction 'vsubsd' {AVX|AVX512_F}.
+ kIdVsubss, //!< Instruction 'vsubss' {AVX|AVX512_F}.
+ kIdVtestpd, //!< Instruction 'vtestpd' {AVX}.
+ kIdVtestps, //!< Instruction 'vtestps' {AVX}.
+ kIdVucomisd, //!< Instruction 'vucomisd' {AVX|AVX512_F}.
+ kIdVucomiss, //!< Instruction 'vucomiss' {AVX|AVX512_F}.
+ kIdVunpckhpd, //!< Instruction 'vunpckhpd' {AVX|AVX512_F+VL}.
+ kIdVunpckhps, //!< Instruction 'vunpckhps' {AVX|AVX512_F+VL}.
+ kIdVunpcklpd, //!< Instruction 'vunpcklpd' {AVX|AVX512_F+VL}.
+ kIdVunpcklps, //!< Instruction 'vunpcklps' {AVX|AVX512_F+VL}.
+ kIdVxorpd, //!< Instruction 'vxorpd' {AVX|AVX512_DQ+VL}.
+ kIdVxorps, //!< Instruction 'vxorps' {AVX|AVX512_DQ+VL}.
+ kIdVzeroall, //!< Instruction 'vzeroall' {AVX}.
+ kIdVzeroupper, //!< Instruction 'vzeroupper' {AVX}.
+ kIdWbinvd, //!< Instruction 'wbinvd'.
+ kIdWbnoinvd, //!< Instruction 'wbnoinvd' {WBNOINVD}.
+ kIdWrfsbase, //!< Instruction 'wrfsbase' {FSGSBASE} (X64).
+ kIdWrgsbase, //!< Instruction 'wrgsbase' {FSGSBASE} (X64).
+ kIdWrmsr, //!< Instruction 'wrmsr' {MSR}.
+ kIdXabort, //!< Instruction 'xabort' {RTM}.
+ kIdXadd, //!< Instruction 'xadd' {I486}.
+ kIdXbegin, //!< Instruction 'xbegin' {RTM}.
+ kIdXchg, //!< Instruction 'xchg'.
+ kIdXend, //!< Instruction 'xend' {RTM}.
+ kIdXgetbv, //!< Instruction 'xgetbv' {XSAVE}.
+ kIdXlatb, //!< Instruction 'xlatb'.
+ kIdXor, //!< Instruction 'xor'.
+ kIdXorpd, //!< Instruction 'xorpd' {SSE2}.
+ kIdXorps, //!< Instruction 'xorps' {SSE}.
+ kIdXrstor, //!< Instruction 'xrstor' {XSAVE}.
+ kIdXrstor64, //!< Instruction 'xrstor64' {XSAVE} (X64).
+ kIdXrstors, //!< Instruction 'xrstors' {XSAVES}.
+ kIdXrstors64, //!< Instruction 'xrstors64' {XSAVES} (X64).
+ kIdXsave, //!< Instruction 'xsave' {XSAVE}.
+ kIdXsave64, //!< Instruction 'xsave64' {XSAVE} (X64).
+ kIdXsavec, //!< Instruction 'xsavec' {XSAVEC}.
+ kIdXsavec64, //!< Instruction 'xsavec64' {XSAVEC} (X64).
+ kIdXsaveopt, //!< Instruction 'xsaveopt' {XSAVEOPT}.
+ kIdXsaveopt64, //!< Instruction 'xsaveopt64' {XSAVEOPT} (X64).
+ kIdXsaves, //!< Instruction 'xsaves' {XSAVES}.
+ kIdXsaves64, //!< Instruction 'xsaves64' {XSAVES} (X64).
+ kIdXsetbv, //!< Instruction 'xsetbv' {XSAVE}.
+ kIdXtest, //!< Instruction 'xtest' {TSX}.
+ _kIdCount
+ // ${InstId:End}
+ };
+
+ //! Instruction options.
+ enum Options : uint32_t {
+ kOptionVex3 = 0x00000400u, //!< Use 3-byte VEX prefix if possible (AVX) (must be 0x00000400).
+ kOptionModMR = 0x00000800u, //!< Use ModMR instead of ModRM when it's available.
+ kOptionEvex = 0x00001000u, //!< Use 4-byte EVEX prefix if possible (AVX-512) (must be 0x00001000).
+
+ kOptionLock = 0x00002000u, //!< LOCK prefix (lock-enabled instructions only).
+ kOptionRep = 0x00004000u, //!< REP prefix (string instructions only).
+ kOptionRepne = 0x00008000u, //!< REPNE prefix (string instructions only).
+
+ kOptionXAcquire = 0x00010000u, //!< XACQUIRE prefix (only allowed instructions).
+ kOptionXRelease = 0x00020000u, //!< XRELEASE prefix (only allowed instructions).
+
+ kOptionER = 0x00040000u, //!< AVX-512: embedded-rounding {er} and implicit {sae}.
+ kOptionSAE = 0x00080000u, //!< AVX-512: suppress-all-exceptions {sae}.
+ kOptionRN_SAE = 0x00000000u, //!< AVX-512: round-to-nearest (even) {rn-sae} (bits 00).
+ kOptionRD_SAE = 0x00200000u, //!< AVX-512: round-down (toward -inf) {rd-sae} (bits 01).
+ kOptionRU_SAE = 0x00400000u, //!< AVX-512: round-up (toward +inf) {ru-sae} (bits 10).
+ kOptionRZ_SAE = 0x00600000u, //!< AVX-512: round-toward-zero (truncate) {rz-sae} (bits 11).
+ kOptionZMask = 0x00800000u, //!< AVX-512: Use zeroing {k}{z} instead of merging {k}.
+ _kOptionAvx512Mask = 0x00FC0000u, //!< AVX-512: Mask of all possible AVX-512 options except EVEX prefix flag.
+
+ kOptionOpCodeB = 0x01000000u, //!< REX.B and/or VEX.B field (X64).
+ kOptionOpCodeX = 0x02000000u, //!< REX.X and/or VEX.X field (X64).
+ kOptionOpCodeR = 0x04000000u, //!< REX.R and/or VEX.R field (X64).
+ kOptionOpCodeW = 0x08000000u, //!< REX.W and/or VEX.W field (X64).
+ kOptionRex = 0x40000000u, //!< Force REX prefix (X64).
+ _kOptionInvalidRex = 0x80000000u //!< Invalid REX prefix (set by X86 or when AH|BH|CH|DH regs are used on X64).
+ };
+
+ // --------------------------------------------------------------------------
+ // [Statics]
+ // --------------------------------------------------------------------------
+
+ //! Tests whether the `instId` is defined (counts also Inst::kIdNone, which must be zero).
+ static inline bool isDefinedId(uint32_t instId) noexcept { return instId < _kIdCount; }
+};
+
+// ============================================================================
+// [asmjit::x86::Condition]
+// ============================================================================
+
+namespace Condition {
+ //! Condition code.
+ enum Code : uint32_t {
+ kO = 0x00u, //!< OF==1
+ kNO = 0x01u, //!< OF==0
+ kB = 0x02u, //!< CF==1 (unsigned < )
+ kC = 0x02u, //!< CF==1
+ kNAE = 0x02u, //!< CF==1 (unsigned < )
+ kAE = 0x03u, //!< CF==0 (unsigned >=)
+ kNB = 0x03u, //!< CF==0 (unsigned >=)
+ kNC = 0x03u, //!< CF==0
+ kE = 0x04u, //!< ZF==1 (any_sign ==)
+ kZ = 0x04u, //!< ZF==1 (any_sign ==)
+ kNE = 0x05u, //!< ZF==0 (any_sign !=)
+ kNZ = 0x05u, //!< ZF==0 (any_sign !=)
+ kBE = 0x06u, //!< CF==1 | ZF==1 (unsigned <=)
+ kNA = 0x06u, //!< CF==1 | ZF==1 (unsigned <=)
+ kA = 0x07u, //!< CF==0 & ZF==0 (unsigned > )
+ kNBE = 0x07u, //!< CF==0 & ZF==0 (unsigned > )
+ kS = 0x08u, //!< SF==1 (is negative)
+ kNS = 0x09u, //!< SF==0 (is positive or zero)
+ kP = 0x0Au, //!< PF==1
+ kPE = 0x0Au, //!< PF==1
+ kPO = 0x0Bu, //!< PF==0
+ kNP = 0x0Bu, //!< PF==0
+ kL = 0x0Cu, //!< SF!=OF (signed < )
+ kNGE = 0x0Cu, //!< SF!=OF (signed < )
+ kGE = 0x0Du, //!< SF==OF (signed >=)
+ kNL = 0x0Du, //!< SF==OF (signed >=)
+ kLE = 0x0Eu, //!< ZF==1 | SF!=OF (signed <=)
+ kNG = 0x0Eu, //!< ZF==1 | SF!=OF (signed <=)
+ kG = 0x0Fu, //!< ZF==0 & SF==OF (signed > )
+ kNLE = 0x0Fu, //!< ZF==0 & SF==OF (signed > )
+ kCount = 0x10u,
+
+ kSign = kS, //!< Sign.
+ kNotSign = kNS, //!< Not Sign.
+
+ kOverflow = kO, //!< Signed overflow.
+ kNotOverflow = kNO, //!< Not signed overflow.
+
+ kEqual = kE, //!< Equal `a == b`.
+ kNotEqual = kNE, //!< Not Equal `a != b`.
+
+ kSignedLT = kL, //!< Signed `a < b`.
+ kSignedLE = kLE, //!< Signed `a <= b`.
+ kSignedGT = kG, //!< Signed `a > b`.
+ kSignedGE = kGE, //!< Signed `a >= b`.
+
+ kUnsignedLT = kB, //!< Unsigned `a < b`.
+ kUnsignedLE = kBE, //!< Unsigned `a <= b`.
+ kUnsignedGT = kA, //!< Unsigned `a > b`.
+ kUnsignedGE = kAE, //!< Unsigned `a >= b`.
+
+ kZero = kZ,
+ kNotZero = kNZ,
+
+ kNegative = kS,
+ kPositive = kNS,
+
+ kParityEven = kP,
+ kParityOdd = kPO
+ };
+
+ static constexpr uint8_t reverseTable[kCount] = {
+ kO, kNO, kA , kBE, // O|NO|B |AE
+ kE, kNE, kAE, kB , // E|NE|BE|A
+ kS, kNS, kPE, kPO, // S|NS|PE|PO
+ kG, kLE, kGE, kL // L|GE|LE|G
+ };
+
+ #define ASMJIT_INST_FROM_COND(ID) \
+ ID##o, ID##no, ID##b , ID##ae, \
+ ID##e, ID##ne, ID##be, ID##a , \
+ ID##s, ID##ns, ID##pe, ID##po, \
+ ID##l, ID##ge, ID##le, ID##g
+ static constexpr uint16_t jccTable[] = { ASMJIT_INST_FROM_COND(Inst::kIdJ) };
+ static constexpr uint16_t setccTable[] = { ASMJIT_INST_FROM_COND(Inst::kIdSet) };
+ static constexpr uint16_t cmovccTable[] = { ASMJIT_INST_FROM_COND(Inst::kIdCmov) };
+ #undef ASMJIT_INST_FROM_COND
+
+ //! Reverse a condition code (reverses the corresponding operands of a comparison).
+ static constexpr uint32_t reverse(uint32_t cond) noexcept { return reverseTable[cond]; }
+ //! Negate a condition code.
+ static constexpr uint32_t negate(uint32_t cond) noexcept { return cond ^ 1u; }
+
+ //! Translate a condition code `cond` to a `jcc` instruction id.
+ static constexpr uint32_t toJcc(uint32_t cond) noexcept { return jccTable[cond]; }
+ //! Translate a condition code `cond` to a `setcc` instruction id.
+ static constexpr uint32_t toSetcc(uint32_t cond) noexcept { return setccTable[cond]; }
+ //! Translate a condition code `cond` to a `cmovcc` instruction id.
+ static constexpr uint32_t toCmovcc(uint32_t cond) noexcept { return cmovccTable[cond]; }
+}
+
+// ============================================================================
+// [asmjit::x86::FpuWord]
+// ============================================================================
+
+//! FPU control and status word.
+namespace FpuWord {
+ //! FPU status word.
+ enum Status : uint32_t {
+ kStatusInvalid = 0x0001u,
+ kStatusDenormalized = 0x0002u,
+ kStatusDivByZero = 0x0004u,
+ kStatusOverflow = 0x0008u,
+ kStatusUnderflow = 0x0010u,
+ kStatusPrecision = 0x0020u,
+ kStatusStackFault = 0x0040u,
+ kStatusInterrupt = 0x0080u,
+ kStatusC0 = 0x0100u,
+ kStatusC1 = 0x0200u,
+ kStatusC2 = 0x0400u,
+ kStatusTop = 0x3800u,
+ kStatusC3 = 0x4000u,
+ kStatusBusy = 0x8000u
+ };
+
+ //! FPU control word.
+ enum Control : uint32_t {
+ // Bits 0-5.
+ kControlEM_Mask = 0x003Fu,
+ kControlEM_Invalid = 0x0001u,
+ kControlEM_Denormal = 0x0002u,
+ kControlEM_DivByZero = 0x0004u,
+ kControlEM_Overflow = 0x0008u,
+ kControlEM_Underflow = 0x0010u,
+ kControlEM_Inexact = 0x0020u,
+
+ // Bits 8-9.
+ kControlPC_Mask = 0x0300u,
+ kControlPC_Float = 0x0000u,
+ kControlPC_Reserved = 0x0100u,
+ kControlPC_Double = 0x0200u,
+ kControlPC_Extended = 0x0300u,
+
+ // Bits 10-11.
+ kControlRC_Mask = 0x0C00u,
+ kControlRC_Nearest = 0x0000u,
+ kControlRC_Down = 0x0400u,
+ kControlRC_Up = 0x0800u,
+ kControlRC_Truncate = 0x0C00u,
+
+ // Bit 12.
+ kControlIC_Mask = 0x1000u,
+ kControlIC_Projective = 0x0000u,
+ kControlIC_Affine = 0x1000u
+ };
+}
+
+// ============================================================================
+// [asmjit::x86::Status]
+// ============================================================================
+
+//! CPU and FPU status flags.
+namespace Status {
+ //! CPU and FPU status flags used by `InstRWInfo`
+ enum Flags : uint32_t {
+ // ------------------------------------------------------------------------
+ // [Architecture Neutral Flags - 0x000000FF]
+ // ------------------------------------------------------------------------
+
+ kCF = 0x00000001u, //!< Carry flag.
+ kOF = 0x00000002u, //!< Signed overflow flag.
+ kSF = 0x00000004u, //!< Sign flag (negative/sign, if set).
+ kZF = 0x00000008u, //!< Zero and/or equality flag (1 if zero/equal).
+
+ // ------------------------------------------------------------------------
+ // [Architecture Specific Flags - 0xFFFFFF00]
+ // ------------------------------------------------------------------------
+
+ kAF = 0x00000100u, //!< Adjust flag.
+ kPF = 0x00000200u, //!< Parity flag.
+ kDF = 0x00000400u, //!< Direction flag.
+ kIF = 0x00000800u, //!< Interrupt enable flag.
+
+ kAC = 0x00001000u, //!< Alignment check.
+
+ kC0 = 0x00010000u, //!< FPU C0 status flag.
+ kC1 = 0x00020000u, //!< FPU C1 status flag.
+ kC2 = 0x00040000u, //!< FPU C2 status flag.
+ kC3 = 0x00080000u //!< FPU C3 status flag.
+ };
+}
+
+// ============================================================================
+// [asmjit::x86::Predicate]
+// ============================================================================
+
+//! Contains predicates used by SIMD instructions.
+namespace Predicate {
+ //! A predicate used by CMP[PD|PS|SD|SS] instructions.
+ enum Cmp : uint32_t {
+ kCmpEQ = 0x00u, //!< Equal (Quiet).
+ kCmpLT = 0x01u, //!< Less (Signaling).
+ kCmpLE = 0x02u, //!< Less/Equal (Signaling).
+ kCmpUNORD = 0x03u, //!< Unordered (Quiet).
+ kCmpNEQ = 0x04u, //!< Not Equal (Quiet).
+ kCmpNLT = 0x05u, //!< Not Less (Signaling).
+ kCmpNLE = 0x06u, //!< Not Less/Equal (Signaling).
+ kCmpORD = 0x07u //!< Ordered (Quiet).
+ };
+
+ //! A predicate used by [V]PCMP[I|E]STR[I|M] instructions.
+ enum PCmpStr : uint32_t {
+ // Source data format:
+ kPCmpStrUB = 0x00u << 0, //!< The source data format is unsigned bytes.
+ kPCmpStrUW = 0x01u << 0, //!< The source data format is unsigned words.
+ kPCmpStrSB = 0x02u << 0, //!< The source data format is signed bytes.
+ kPCmpStrSW = 0x03u << 0, //!< The source data format is signed words.
+
+ // Aggregation operation:
+ kPCmpStrEqualAny = 0x00u << 2, //!< The arithmetic comparison is "equal".
+ kPCmpStrRanges = 0x01u << 2, //!< The arithmetic comparison is "greater than or equal"
+ //!< between even indexed elements and "less than or equal"
+ //!< between odd indexed elements.
+ kPCmpStrEqualEach = 0x02u << 2, //!< The arithmetic comparison is "equal".
+ kPCmpStrEqualOrdered = 0x03u << 2, //!< The arithmetic comparison is "equal".
+
+ // Polarity:
+ kPCmpStrPosPolarity = 0x00u << 4, //!< IntRes2 = IntRes1.
+ kPCmpStrNegPolarity = 0x01u << 4, //!< IntRes2 = -1 XOR IntRes1.
+ kPCmpStrPosMasked = 0x02u << 4, //!< IntRes2 = IntRes1.
+ kPCmpStrNegMasked = 0x03u << 4, //!< IntRes2[i] = second[i] == invalid ? IntRes1[i] : ~IntRes1[i].
+
+ // Output selection (pcmpstri):
+ kPCmpStrOutputLSI = 0x00u << 6, //!< The index returned to ECX is of the least significant set bit in IntRes2.
+ kPCmpStrOutputMSI = 0x01u << 6, //!< The index returned to ECX is of the most significant set bit in IntRes2.
+
+ // Output selection (pcmpstrm):
+ kPCmpStrBitMask = 0x00u << 6, //!< IntRes2 is returned as the mask to the least significant bits of XMM0.
+ kPCmpStrIndexMask = 0x01u << 6 //!< IntRes2 is expanded into a byte/word mask and placed in XMM0.
+ };
+
+ //! A predicate used by ROUND[PD|PS|SD|SS] instructions.
+ enum Round : uint32_t {
+ kRoundNearest = 0x00u, //!< Round to nearest (even).
+ kRoundDown = 0x01u, //!< Round to down toward -INF (floor),
+ kRoundUp = 0x02u, //!< Round to up toward +INF (ceil).
+ kRoundTrunc = 0x03u, //!< Round toward zero (truncate).
+ kRoundCurrent = 0x04u, //!< Round to the current rounding mode set (ignores other RC bits).
+ kRoundInexact = 0x08u //!< Avoids inexact exception, if set.
+ };
+
+ //! A predicate used by VCMP[PD|PS|SD|SS] instructions.
+ //!
+ //! The first 8 values are compatible with `Cmp`.
+ enum VCmp : uint32_t {
+ kVCmpEQ_OQ = kCmpEQ, //!< Equal (Quiet , Ordered).
+ kVCmpLT_OS = kCmpLT, //!< Less (Signaling, Ordered).
+ kVCmpLE_OS = kCmpLE, //!< Less/Equal (Signaling, Ordered).
+ kVCmpUNORD_Q = kCmpUNORD, //!< Unordered (Quiet).
+ kVCmpNEQ_UQ = kCmpNEQ, //!< Not Equal (Quiet , Unordered).
+ kVCmpNLT_US = kCmpNLT, //!< Not Less (Signaling, Unordered).
+ kVCmpNLE_US = kCmpNLE, //!< Not Less/Equal (Signaling, Unordered).
+ kVCmpORD_Q = kCmpORD, //!< Ordered (Quiet).
+ kVCmpEQ_UQ = 0x08u, //!< Equal (Quiet , Unordered).
+ kVCmpNGE_US = 0x09u, //!< Not Greater/Equal (Signaling, Unordered).
+ kVCmpNGT_US = 0x0Au, //!< Not Greater (Signaling, Unordered).
+ kVCmpFALSE_OQ = 0x0Bu, //!< False (Quiet , Ordered).
+ kVCmpNEQ_OQ = 0x0Cu, //!< Not Equal (Quiet , Ordered).
+ kVCmpGE_OS = 0x0Du, //!< Greater/Equal (Signaling, Ordered).
+ kVCmpGT_OS = 0x0Eu, //!< Greater (Signaling, Ordered).
+ kVCmpTRUE_UQ = 0x0Fu, //!< True (Quiet , Unordered).
+ kVCmpEQ_OS = 0x10u, //!< Equal (Signaling, Ordered).
+ kVCmpLT_OQ = 0x11u, //!< Less (Quiet , Ordered).
+ kVCmpLE_OQ = 0x12u, //!< Less/Equal (Quiet , Ordered).
+ kVCmpUNORD_S = 0x13u, //!< Unordered (Signaling).
+ kVCmpNEQ_US = 0x14u, //!< Not Equal (Signaling, Unordered).
+ kVCmpNLT_UQ = 0x15u, //!< Not Less (Quiet , Unordered).
+ kVCmpNLE_UQ = 0x16u, //!< Not Less/Equal (Quiet , Unordered).
+ kVCmpORD_S = 0x17u, //!< Ordered (Signaling).
+ kVCmpEQ_US = 0x18u, //!< Equal (Signaling, Unordered).
+ kVCmpNGE_UQ = 0x19u, //!< Not Greater/Equal (Quiet , Unordered).
+ kVCmpNGT_UQ = 0x1Au, //!< Not Greater (Quiet , Unordered).
+ kVCmpFALSE_OS = 0x1Bu, //!< False (Signaling, Ordered).
+ kVCmpNEQ_OS = 0x1Cu, //!< Not Equal (Signaling, Ordered).
+ kVCmpGE_OQ = 0x1Du, //!< Greater/Equal (Quiet , Ordered).
+ kVCmpGT_OQ = 0x1Eu, //!< Greater (Quiet , Ordered).
+ kVCmpTRUE_US = 0x1Fu //!< True (Signaling, Unordered).
+ };
+
+ //! A predicate used by VFIXUPIMM[PD|PS|SD|SS] instructions (AVX-512).
+ enum VFixupImm : uint32_t {
+ kVFixupImmZEOnZero = 0x01u,
+ kVFixupImmIEOnZero = 0x02u,
+ kVFixupImmZEOnOne = 0x04u,
+ kVFixupImmIEOnOne = 0x08u,
+ kVFixupImmIEOnSNaN = 0x10u,
+ kVFixupImmIEOnNInf = 0x20u,
+ kVFixupImmIEOnNegative= 0x40u,
+ kVFixupImmIEOnPInf = 0x80u
+ };
+
+ //! A predicate used by VFPCLASS[PD|PS|SD|SS] instructions (AVX-512).
+ //!
+ //! \note Values can be combined together to form the final 8-bit mask.
+ enum VFPClass : uint32_t {
+ kVFPClassQNaN = 0x01u, //!< Checks for QNaN.
+ kVFPClassPZero = 0x02u, //!< Checks for +0.
+ kVFPClassNZero = 0x04u, //!< Checks for -0.
+ kVFPClassPInf = 0x08u, //!< Checks for +Inf.
+ kVFPClassNInf = 0x10u, //!< Checks for -Inf.
+ kVFPClassDenormal = 0x20u, //!< Checks for denormal.
+ kVFPClassNegative = 0x40u, //!< Checks for negative finite value.
+ kVFPClassSNaN = 0x80u //!< Checks for SNaN.
+ };
+
+ //! A predicate used by VGETMANT[PD|PS|SD|SS] instructions (AVX-512).
+ enum VGetMant : uint32_t {
+ kVGetMant1To2 = 0x00u,
+ kVGetMant1Div2To2 = 0x01u,
+ kVGetMant1Div2To1 = 0x02u,
+ kVGetMant3Div4To3Div2 = 0x03u,
+ kVGetMantNoSign = 0x04u,
+ kVGetMantQNaNIfSign = 0x08u
+ };
+
+ //! A predicate used by VPCMP[U][B|W|D|Q] instructions (AVX-512).
+ enum VPCmp : uint32_t {
+ kVPCmpEQ = 0x00u, //!< Equal.
+ kVPCmpLT = 0x01u, //!< Less.
+ kVPCmpLE = 0x02u, //!< Less/Equal.
+ kVPCmpFALSE = 0x03u, //!< False.
+ kVPCmpNE = 0x04u, //!< Not Equal.
+ kVPCmpGE = 0x05u, //!< Greater/Equal.
+ kVPCmpGT = 0x06u, //!< Greater.
+ kVPCmpTRUE = 0x07u //!< True.
+ };
+
+ //! A predicate used by VPCOM[U][B|W|D|Q] instructions (XOP).
+ enum VPCom : uint32_t {
+ kVPComLT = 0x00u, //!< Less.
+ kVPComLE = 0x01u, //!< Less/Equal
+ kVPComGT = 0x02u, //!< Greater.
+ kVPComGE = 0x03u, //!< Greater/Equal.
+ kVPComEQ = 0x04u, //!< Equal.
+ kVPComNE = 0x05u, //!< Not Equal.
+ kVPComFALSE = 0x06u, //!< False.
+ kVPComTRUE = 0x07u //!< True.
+ };
+
+ //! A predicate used by VRANGE[PD|PS|SD|SS] instructions (AVX-512).
+ enum VRange : uint32_t {
+ kVRangeSelectMin = 0x00u, //!< Select minimum value.
+ kVRangeSelectMax = 0x01u, //!< Select maximum value.
+ kVRangeSelectAbsMin = 0x02u, //!< Select minimum absolute value.
+ kVRangeSelectAbsMax = 0x03u, //!< Select maximum absolute value.
+ kVRangeSignSrc1 = 0x00u, //!< Select sign of SRC1.
+ kVRangeSignSrc2 = 0x04u, //!< Select sign of SRC2.
+ kVRangeSign0 = 0x08u, //!< Set sign to 0.
+ kVRangeSign1 = 0x0Cu //!< Set sign to 1.
+ };
+
+ //! A predicate used by VREDUCE[PD|PS|SD|SS] instructions (AVX-512).
+ enum VReduce : uint32_t {
+ kVReduceRoundCurrent = 0x00u, //!< Round to the current mode set.
+ kVReduceRoundEven = 0x04u, //!< Round to nearest even.
+ kVReduceRoundDown = 0x05u, //!< Round down.
+ kVReduceRoundUp = 0x06u, //!< Round up.
+ kVReduceRoundTrunc = 0x07u, //!< Truncate.
+ kVReduceSuppress = 0x08u //!< Suppress exceptions.
+ };
+
+ //! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (2 values).
+ //!
+ //! \param a Position of the first component [0, 1].
+ //! \param b Position of the second component [0, 1].
+ //!
+ //! Shuffle constants can be used to encode an immediate for these instructions:
+ //! - `shufpd|vshufpd`
+ static constexpr uint32_t shuf(uint32_t a, uint32_t b) noexcept {
+ return (a << 1) | b;
+ }
+
+ //! Pack a shuffle constant to be used by SSE/AVX/AVX-512 instructions (4 values).
+ //!
+ //! \param a Position of the first component [0, 3].
+ //! \param b Position of the second component [0, 3].
+ //! \param c Position of the third component [0, 3].
+ //! \param d Position of the fourth component [0, 3].
+ //!
+ //! Shuffle constants can be used to encode an immediate for these instructions:
+ //! - `pshufw`
+ //! - `pshuflw|vpshuflw`
+ //! - `pshufhw|vpshufhw`
+ //! - `pshufd|vpshufd`
+ //! - `shufps|vshufps`
+ static constexpr uint32_t shuf(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
+ return (a << 6) | (b << 4) | (c << 2) | d;
+ }
+}
+
+// ============================================================================
+// [asmjit::x86::TLog]
+// ============================================================================
+
+//! Bitwise ternary logic between 3 operands introduced by AVX-512.
+namespace TLog {
+ //! A predicate that can be used to create a common predicate for VPTERNLOG[D|Q].
+ enum Operator : uint32_t {
+ k0 = 0x00u, //!< 0 value.
+ k1 = 0xFFu, //!< 1 value.
+ kA = 0xF0u, //!< A value.
+ kB = 0xCCu, //!< B value.
+ kC = 0xAAu, //!< C value.
+ kNotA = kA ^ k1, //!< `!A` expression.
+ kNotB = kB ^ k1, //!< `!B` expression.
+ kNotC = kC ^ k1, //!< `!C` expression.
+
+ kAB = kA & kB, //!< `A & B` expression.
+ kAC = kA & kC, //!< `A & C` expression.
+ kBC = kB & kC, //!< `B & C` expression.
+ kNotAB = kAB ^ k1, //!< `!(A & B)` expression.
+ kNotAC = kAC ^ k1, //!< `!(A & C)` expression.
+ kNotBC = kBC ^ k1, //!< `!(B & C)` expression.
+
+ kABC = kAB & kC, //!< `A & B & C` expression.
+ kNotABC = kABC ^ k1 //!< `!(A & B & C)` expression.
+ };
+
+ //! Creates an immediate that can be used by VPTERNLOG[D|Q] instructions.
+ static constexpr uint32_t make(uint32_t b000, uint32_t b001, uint32_t b010, uint32_t b011, uint32_t b100, uint32_t b101, uint32_t b110, uint32_t b111) noexcept {
+ return (b000 << 0) | (b001 << 1) | (b010 << 2) | (b011 << 3) | (b100 << 4) | (b101 << 5) | (b110 << 6) | (b111 << 7);
+ }
+
+ //! Creates an immediate that can be used by VPTERNLOG[D|Q] instructions.
+ static constexpr uint32_t value(uint32_t x) noexcept { return x & 0xFF; }
+ //! Negate an immediate that can be used by VPTERNLOG[D|Q] instructions.
+ static constexpr uint32_t negate(uint32_t x) noexcept { return x ^ 0xFF; }
+ //! Creates an if/else logic that can be used by VPTERNLOG[D|Q] instructions.
+ static constexpr uint32_t ifElse(uint32_t condition, uint32_t a, uint32_t b) noexcept { return (condition & a) | (negate(condition) & b); }
+}
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86GLOBALS_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp b/3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp
new file mode 100644
index 00000000000..6c5e28be46a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86instapi.cpp
@@ -0,0 +1,1543 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+// ----------------------------------------------------------------------------
+// IMPORTANT: AsmJit now uses an external instruction database to populate
+// static tables within this file. Perform the following steps to regenerate
+// all tables enclosed by ${...}:
+//
+// 1. Install node.js environment <https://nodejs.org>
+// 2. Go to asmjit/tools directory
+// 3. Get the latest asmdb from <https://github.com/asmjit/asmdb> and
+// copy/link the `asmdb` directory to `asmjit/tools/asmdb`.
+// 4. Execute `node tablegen-x86.js`
+//
+// Instruction encoding and opcodes were added to the `x86inst.cpp` database
+// manually in the past and they are not updated by the script as it became
+// tricky. However, everything else is updated including instruction operands
+// and tables required to validate them, instruction read/write information
+// (including registers and flags), and all indexes to all tables.
+// ----------------------------------------------------------------------------
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/cpuinfo.h"
+#include "../core/misc_p.h"
+#include "../core/support.h"
+#include "../x86/x86features.h"
+#include "../x86/x86instapi_p.h"
+#include "../x86/x86instdb_p.h"
+#include "../x86/x86opcode_p.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::InstInternal - Text]
+// ============================================================================
+
+#ifndef ASMJIT_NO_TEXT
+Error InstInternal::instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept {
+ DebugUtils::unused(archId);
+
+ if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId)))
+ return DebugUtils::errored(kErrorInvalidInstruction);
+
+ const InstDB::InstInfo& info = InstDB::infoById(instId);
+ return output.appendString(InstDB::_nameData + info._nameDataIndex);
+}
+
+uint32_t InstInternal::stringToInstId(uint32_t archId, const char* s, size_t len) noexcept {
+ DebugUtils::unused(archId);
+
+ if (ASMJIT_UNLIKELY(!s))
+ return Inst::kIdNone;
+
+ if (len == SIZE_MAX)
+ len = strlen(s);
+
+ if (ASMJIT_UNLIKELY(len == 0 || len > InstDB::kMaxNameSize))
+ return Inst::kIdNone;
+
+ uint32_t prefix = uint32_t(s[0]) - 'a';
+ if (ASMJIT_UNLIKELY(prefix > 'z' - 'a'))
+ return Inst::kIdNone;
+
+ uint32_t index = InstDB::instNameIndex[prefix].start;
+ if (ASMJIT_UNLIKELY(!index))
+ return Inst::kIdNone;
+
+ const char* nameData = InstDB::_nameData;
+ const InstDB::InstInfo* table = InstDB::_instInfoTable;
+
+ const InstDB::InstInfo* base = table + index;
+ const InstDB::InstInfo* end = table + InstDB::instNameIndex[prefix].end;
+
+ for (size_t lim = (size_t)(end - base); lim != 0; lim >>= 1) {
+ const InstDB::InstInfo* cur = base + (lim >> 1);
+ int result = Support::cmpInstName(nameData + cur[0]._nameDataIndex, s, len);
+
+ if (result < 0) {
+ base = cur + 1;
+ lim--;
+ continue;
+ }
+
+ if (result > 0)
+ continue;
+
+ return uint32_t((size_t)(cur - table));
+ }
+
+ return Inst::kIdNone;
+}
+#endif // !ASMJIT_NO_TEXT
+
+// ============================================================================
+// [asmjit::x86::InstInternal - Validate]
+// ============================================================================
+
+#ifndef ASMJIT_NO_VALIDATION
+struct X86ValidationData {
+ //! Allowed registers by reg-type (x86::Reg::kType...).
+ uint32_t allowedRegMask[Reg::kTypeMax + 1];
+ uint32_t allowedMemBaseRegs;
+ uint32_t allowedMemIndexRegs;
+};
+
+#define VALUE(X) \
+ (X == Reg::kTypeGpbLo) ? InstDB::kOpGpbLo : \
+ (X == Reg::kTypeGpbHi) ? InstDB::kOpGpbHi : \
+ (X == Reg::kTypeGpw ) ? InstDB::kOpGpw : \
+ (X == Reg::kTypeGpd ) ? InstDB::kOpGpd : \
+ (X == Reg::kTypeGpq ) ? InstDB::kOpGpq : \
+ (X == Reg::kTypeXmm ) ? InstDB::kOpXmm : \
+ (X == Reg::kTypeYmm ) ? InstDB::kOpYmm : \
+ (X == Reg::kTypeZmm ) ? InstDB::kOpZmm : \
+ (X == Reg::kTypeMm ) ? InstDB::kOpMm : \
+ (X == Reg::kTypeKReg ) ? InstDB::kOpKReg : \
+ (X == Reg::kTypeSReg ) ? InstDB::kOpSReg : \
+ (X == Reg::kTypeCReg ) ? InstDB::kOpCReg : \
+ (X == Reg::kTypeDReg ) ? InstDB::kOpDReg : \
+ (X == Reg::kTypeSt ) ? InstDB::kOpSt : \
+ (X == Reg::kTypeBnd ) ? InstDB::kOpBnd : \
+ (X == Reg::kTypeRip ) ? InstDB::kOpNone : InstDB::kOpNone
+static const uint32_t _x86OpFlagFromRegType[Reg::kTypeMax + 1] = { ASMJIT_LOOKUP_TABLE_32(VALUE, 0) };
+#undef VALUE
+
+#define REG_MASK_FROM_REG_TYPE_X86(X) \
+ (X == Reg::kTypeGpbLo) ? 0x0000000Fu : \
+ (X == Reg::kTypeGpbHi) ? 0x0000000Fu : \
+ (X == Reg::kTypeGpw ) ? 0x000000FFu : \
+ (X == Reg::kTypeGpd ) ? 0x000000FFu : \
+ (X == Reg::kTypeGpq ) ? 0x000000FFu : \
+ (X == Reg::kTypeXmm ) ? 0x000000FFu : \
+ (X == Reg::kTypeYmm ) ? 0x000000FFu : \
+ (X == Reg::kTypeZmm ) ? 0x000000FFu : \
+ (X == Reg::kTypeMm ) ? 0x000000FFu : \
+ (X == Reg::kTypeKReg ) ? 0x000000FFu : \
+ (X == Reg::kTypeSReg ) ? 0x0000007Eu : \
+ (X == Reg::kTypeCReg ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeDReg ) ? 0x000000FFu : \
+ (X == Reg::kTypeSt ) ? 0x000000FFu : \
+ (X == Reg::kTypeBnd ) ? 0x0000000Fu : \
+ (X == Reg::kTypeRip ) ? 0x00000001u : 0u
+
+#define REG_MASK_FROM_REG_TYPE_X64(X) \
+ (X == Reg::kTypeGpbLo) ? 0x0000FFFFu : \
+ (X == Reg::kTypeGpbHi) ? 0x0000000Fu : \
+ (X == Reg::kTypeGpw ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeGpd ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeGpq ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeXmm ) ? 0xFFFFFFFFu : \
+ (X == Reg::kTypeYmm ) ? 0xFFFFFFFFu : \
+ (X == Reg::kTypeZmm ) ? 0xFFFFFFFFu : \
+ (X == Reg::kTypeMm ) ? 0x000000FFu : \
+ (X == Reg::kTypeKReg ) ? 0x000000FFu : \
+ (X == Reg::kTypeSReg ) ? 0x0000007Eu : \
+ (X == Reg::kTypeCReg ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeDReg ) ? 0x0000FFFFu : \
+ (X == Reg::kTypeSt ) ? 0x000000FFu : \
+ (X == Reg::kTypeBnd ) ? 0x0000000Fu : \
+ (X == Reg::kTypeRip ) ? 0x00000001u : 0u
+
+static const X86ValidationData _x86ValidationData = {
+ { ASMJIT_LOOKUP_TABLE_32(REG_MASK_FROM_REG_TYPE_X86, 0) },
+ (1u << Reg::kTypeGpw) | (1u << Reg::kTypeGpd) | (1u << Reg::kTypeRip) | (1u << Label::kLabelTag),
+ (1u << Reg::kTypeGpw) | (1u << Reg::kTypeGpd) | (1u << Reg::kTypeXmm) | (1u << Reg::kTypeYmm) | (1u << Reg::kTypeZmm)
+};
+
+static const X86ValidationData _x64ValidationData = {
+ { ASMJIT_LOOKUP_TABLE_32(REG_MASK_FROM_REG_TYPE_X64, 0) },
+ (1u << Reg::kTypeGpd) | (1u << Reg::kTypeGpq) | (1u << Reg::kTypeRip) | (1u << Label::kLabelTag),
+ (1u << Reg::kTypeGpd) | (1u << Reg::kTypeGpq) | (1u << Reg::kTypeXmm) | (1u << Reg::kTypeYmm) | (1u << Reg::kTypeZmm)
+};
+
+#undef REG_MASK_FROM_REG_TYPE_X64
+#undef REG_MASK_FROM_REG_TYPE_X86
+
+static ASMJIT_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept {
+ return Reg::isZmm(op) || (op.isMem() && op.size() == 64);
+}
+
+static ASMJIT_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept {
+ // Fail if operand types are incompatible.
+ uint32_t opFlags = op.opFlags;
+ if ((opFlags & ref.opFlags) == 0) {
+ // Mark temporarily `immOutOfRange` so we can return a more descriptive error later.
+ if ((opFlags & InstDB::kOpAllImm) && (ref.opFlags & InstDB::kOpAllImm)) {
+ immOutOfRange = true;
+ return true;
+ }
+
+ return false;
+ }
+
+ // Fail if memory specific flags and sizes do not match the signature.
+ uint32_t opMemFlags = op.memFlags;
+ if (opMemFlags != 0) {
+ uint32_t refMemFlags = ref.memFlags;
+ if ((refMemFlags & opMemFlags) == 0)
+ return false;
+
+ if ((refMemFlags & InstDB::kMemOpBaseOnly) && !(opMemFlags & InstDB::kMemOpBaseOnly))
+ return false;
+ }
+
+ // Specific register index.
+ if (opFlags & InstDB::kOpAllRegs) {
+ uint32_t refRegMask = ref.regMask;
+ if (refRegMask && !(op.regMask & refRegMask))
+ return false;
+ }
+
+ return true;
+}
+
+ASMJIT_FAVOR_SIZE Error InstInternal::validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept {
+ // Only called when `archId` matches X86 family.
+ ASMJIT_ASSERT(ArchInfo::isX86Family(archId));
+
+ const X86ValidationData* vd;
+ if (archId == ArchInfo::kIdX86)
+ vd = &_x86ValidationData;
+ else
+ vd = &_x64ValidationData;
+
+ uint32_t i;
+ uint32_t mode = InstDB::modeFromArchId(archId);
+
+ // Get the instruction data.
+ uint32_t instId = inst.id();
+ uint32_t options = inst.options();
+
+ if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId)))
+ return DebugUtils::errored(kErrorInvalidInstruction);
+
+ const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
+ const InstDB::CommonInfo& commonInfo = instInfo.commonInfo();
+
+ uint32_t iFlags = instInfo.flags();
+
+ // --------------------------------------------------------------------------
+ // [Validate LOCK|XACQUIRE|XRELEASE]
+ // --------------------------------------------------------------------------
+
+ const uint32_t kLockXAcqRel = Inst::kOptionXAcquire | Inst::kOptionXRelease;
+ if (options & (Inst::kOptionLock | kLockXAcqRel)) {
+ if (options & Inst::kOptionLock) {
+ if (ASMJIT_UNLIKELY(!(iFlags & InstDB::kFlagLock) && !(options & kLockXAcqRel)))
+ return DebugUtils::errored(kErrorInvalidLockPrefix);
+
+ if (ASMJIT_UNLIKELY(opCount < 1 || !operands[0].isMem()))
+ return DebugUtils::errored(kErrorInvalidLockPrefix);
+ }
+
+ if (options & kLockXAcqRel) {
+ if (ASMJIT_UNLIKELY(!(options & Inst::kOptionLock) || (options & kLockXAcqRel) == kLockXAcqRel))
+ return DebugUtils::errored(kErrorInvalidPrefixCombination);
+
+ if (ASMJIT_UNLIKELY((options & Inst::kOptionXAcquire) && !(iFlags & InstDB::kFlagXAcquire)))
+ return DebugUtils::errored(kErrorInvalidXAcquirePrefix);
+
+ if (ASMJIT_UNLIKELY((options & Inst::kOptionXRelease) && !(iFlags & InstDB::kFlagXRelease)))
+ return DebugUtils::errored(kErrorInvalidXReleasePrefix);
+ }
+ }
+
+ // Validate REP and REPNE prefixes.
+ const uint32_t kRepAny = Inst::kOptionRep | Inst::kOptionRepne;
+ if (options & kRepAny) {
+ if (ASMJIT_UNLIKELY((options & kRepAny) == kRepAny))
+ return DebugUtils::errored(kErrorInvalidPrefixCombination);
+
+ if (ASMJIT_UNLIKELY(!(iFlags & InstDB::kFlagRep)))
+ return DebugUtils::errored(kErrorInvalidRepPrefix);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Translate Each Operand to the Corresponding OpSignature]
+ // --------------------------------------------------------------------------
+
+ InstDB::OpSignature oSigTranslated[Globals::kMaxOpCount];
+ uint32_t combinedOpFlags = 0;
+ uint32_t combinedRegMask = 0;
+ const Mem* memOp = nullptr;
+
+ for (i = 0; i < opCount; i++) {
+ const Operand_& op = operands[i];
+ if (op.opType() == Operand::kOpNone)
+ break;
+
+ uint32_t opFlags = 0;
+ uint32_t memFlags = 0;
+ uint32_t regMask = 0;
+
+ switch (op.opType()) {
+ case Operand::kOpReg: {
+ uint32_t regType = op.as<BaseReg>().type();
+ if (ASMJIT_UNLIKELY(regType >= Reg::kTypeCount))
+ return DebugUtils::errored(kErrorInvalidRegType);
+
+ opFlags = _x86OpFlagFromRegType[regType];
+ if (ASMJIT_UNLIKELY(opFlags == 0))
+ return DebugUtils::errored(kErrorInvalidRegType);
+
+ // If `regId` is equal or greater than Operand::kVirtIdMin it means
+ // that the register is virtual and its index will be assigned later
+ // by the register allocator. We must pass unless asked to disallow
+ // virtual registers.
+ // TODO: We need an option to refuse virtual regs here.
+ uint32_t regId = op.id();
+ if (regId < Operand::kVirtIdMin) {
+ if (ASMJIT_UNLIKELY(regId >= 32))
+ return DebugUtils::errored(kErrorInvalidPhysId);
+
+ if (ASMJIT_UNLIKELY(Support::bitTest(vd->allowedRegMask[regType], regId) == 0))
+ return DebugUtils::errored(kErrorInvalidPhysId);
+
+ regMask = Support::bitMask(regId);
+ combinedRegMask |= regMask;
+ }
+ else {
+ regMask = 0xFFFFFFFFu;
+ }
+ break;
+ }
+
+ // TODO: Validate base and index and combine these with `combinedRegMask`.
+ case Operand::kOpMem: {
+ const Mem& m = op.as<Mem>();
+ memOp = &m;
+
+ uint32_t memSize = m.size();
+ uint32_t baseType = m.baseType();
+ uint32_t indexType = m.indexType();
+
+ if (m.segmentId() > 6)
+ return DebugUtils::errored(kErrorInvalidSegment);
+
+ // Validate AVX-512 broadcast {1tox}.
+ if (m.hasBroadcast()) {
+ if (memSize != 0) {
+ // If the size is specified it has to match the broadcast size.
+ if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B32() && memSize != 4))
+ return DebugUtils::errored(kErrorInvalidBroadcast);
+
+ if (ASMJIT_UNLIKELY(commonInfo.hasAvx512B64() && memSize != 8))
+ return DebugUtils::errored(kErrorInvalidBroadcast);
+ }
+ else {
+ // If there is no size we implicitly calculate it so we can validate N in {1toN} properly.
+ memSize = commonInfo.hasAvx512B32() ? 4 : 8;
+ }
+
+ memSize <<= m.getBroadcast();
+ }
+
+ if (baseType) {
+ uint32_t baseId = m.baseId();
+
+ if (m.isRegHome()) {
+ // Home address of a virtual register. In such case we don't want to
+ // validate the type of the base register as it will always be patched
+ // to ESP|RSP.
+ }
+ else {
+ if (ASMJIT_UNLIKELY((vd->allowedMemBaseRegs & (1u << baseType)) == 0))
+ return DebugUtils::errored(kErrorInvalidAddress);
+ }
+
+ // Create information that will be validated only if this is an implicit
+ // memory operand. Basically only usable for string instructions and other
+ // instructions where memory operand is implicit and has 'seg:[reg]' form.
+ if (baseId < Operand::kVirtIdMin) {
+ // Physical base id.
+ regMask = Support::bitMask(baseId);
+ combinedRegMask |= regMask;
+ }
+ else {
+ // Virtual base id - fill the whole mask for implicit mem validation.
+ // The register is not assigned yet, so we cannot predict the phys id.
+ regMask = 0xFFFFFFFFu;
+ }
+
+ if (!indexType && !m.offsetLo32())
+ memFlags |= InstDB::kMemOpBaseOnly;
+ }
+ else {
+ // Base is a 64-bit address.
+ int64_t offset = m.offset();
+ if (!Support::isInt32(offset)) {
+ if (mode == InstDB::kModeX86) {
+ // 32-bit mode: Make sure that the address is either `int32_t` or `uint32_t`.
+ if (!Support::isUInt32(offset))
+ return DebugUtils::errored(kErrorInvalidAddress64Bit);
+ }
+ else {
+ // 64-bit mode: Zero extension is allowed if the address has 32-bit index
+ // register or the address has no index register (it's still encodable).
+ if (indexType) {
+ if (!Support::isUInt32(offset))
+ return DebugUtils::errored(kErrorInvalidAddress64Bit);
+
+ if (indexType != Reg::kTypeGpd)
+ return DebugUtils::errored(kErrorInvalidAddress64BitZeroExtension);
+ }
+ else {
+ // We don't validate absolute 64-bit addresses without an index register
+ // as this also depends on the target's base address. We don't have the
+ // information to do it at this moment.
+ }
+ }
+ }
+ }
+
+ if (indexType) {
+ if (ASMJIT_UNLIKELY((vd->allowedMemIndexRegs & (1u << indexType)) == 0))
+ return DebugUtils::errored(kErrorInvalidAddress);
+
+ if (indexType == Reg::kTypeXmm) {
+ opFlags |= InstDB::kOpVm;
+ memFlags |= InstDB::kMemOpVm32x | InstDB::kMemOpVm64x;
+ }
+ else if (indexType == Reg::kTypeYmm) {
+ opFlags |= InstDB::kOpVm;
+ memFlags |= InstDB::kMemOpVm32y | InstDB::kMemOpVm64y;
+ }
+ else if (indexType == Reg::kTypeZmm) {
+ opFlags |= InstDB::kOpVm;
+ memFlags |= InstDB::kMemOpVm32z | InstDB::kMemOpVm64z;
+ }
+ else {
+ opFlags |= InstDB::kOpMem;
+ if (baseType)
+ memFlags |= InstDB::kMemOpMib;
+ }
+
+ // [RIP + {XMM|YMM|ZMM}] is not allowed.
+ if (baseType == Reg::kTypeRip && (opFlags & InstDB::kOpVm))
+ return DebugUtils::errored(kErrorInvalidAddress);
+
+ uint32_t indexId = m.indexId();
+ if (indexId < Operand::kVirtIdMin)
+ combinedRegMask |= Support::bitMask(indexId);
+
+ // Only used for implicit memory operands having 'seg:[reg]' form, so clear it.
+ regMask = 0;
+ }
+ else {
+ opFlags |= InstDB::kOpMem;
+ }
+
+ switch (memSize) {
+ case 0: memFlags |= InstDB::kMemOpAny ; break;
+ case 1: memFlags |= InstDB::kMemOpM8 ; break;
+ case 2: memFlags |= InstDB::kMemOpM16 ; break;
+ case 4: memFlags |= InstDB::kMemOpM32 ; break;
+ case 6: memFlags |= InstDB::kMemOpM48 ; break;
+ case 8: memFlags |= InstDB::kMemOpM64 ; break;
+ case 10: memFlags |= InstDB::kMemOpM80 ; break;
+ case 16: memFlags |= InstDB::kMemOpM128; break;
+ case 32: memFlags |= InstDB::kMemOpM256; break;
+ case 64: memFlags |= InstDB::kMemOpM512; break;
+ default:
+ return DebugUtils::errored(kErrorInvalidOperandSize);
+ }
+
+ break;
+ }
+
+ case Operand::kOpImm: {
+ uint64_t immValue = op.as<Imm>().u64();
+ uint32_t immFlags = 0;
+
+ if (int64_t(immValue) >= 0) {
+ if (immValue <= 0x7u)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpI16 | InstDB::kOpU16 | InstDB::kOpI8 | InstDB::kOpU8 |
+ InstDB::kOpI4 | InstDB::kOpU4 ;
+ else if (immValue <= 0xFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpI16 | InstDB::kOpU16 | InstDB::kOpI8 | InstDB::kOpU8 |
+ InstDB::kOpU4 ;
+ else if (immValue <= 0x7Fu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpI16 | InstDB::kOpU16 | InstDB::kOpI8 | InstDB::kOpU8 ;
+ else if (immValue <= 0xFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpI16 | InstDB::kOpU16 | InstDB::kOpU8 ;
+ else if (immValue <= 0x7FFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpI16 | InstDB::kOpU16 ;
+ else if (immValue <= 0xFFFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32 |
+ InstDB::kOpU16 ;
+ else if (immValue <= 0x7FFFFFFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpI32 | InstDB::kOpU32;
+ else if (immValue <= 0xFFFFFFFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64 | InstDB::kOpU32;
+ else if (immValue <= 0x7FFFFFFFFFFFFFFFu)
+ immFlags = InstDB::kOpI64 | InstDB::kOpU64;
+ else
+ immFlags = InstDB::kOpU64;
+ }
+ else {
+ immValue = Support::neg(immValue);
+ if (immValue <= 0x8u)
+ immFlags = InstDB::kOpI64 | InstDB::kOpI32 | InstDB::kOpI16 | InstDB::kOpI8 | InstDB::kOpI4;
+ else if (immValue <= 0x80u)
+ immFlags = InstDB::kOpI64 | InstDB::kOpI32 | InstDB::kOpI16 | InstDB::kOpI8;
+ else if (immValue <= 0x8000u)
+ immFlags = InstDB::kOpI64 | InstDB::kOpI32 | InstDB::kOpI16;
+ else if (immValue <= 0x80000000u)
+ immFlags = InstDB::kOpI64 | InstDB::kOpI32;
+ else
+ immFlags = InstDB::kOpI64;
+ }
+ opFlags |= immFlags;
+ break;
+ }
+
+ case Operand::kOpLabel: {
+ opFlags |= InstDB::kOpRel8 | InstDB::kOpRel32;
+ break;
+ }
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ InstDB::OpSignature& oSigDst = oSigTranslated[i];
+ oSigDst.opFlags = opFlags;
+ oSigDst.memFlags = uint16_t(memFlags);
+ oSigDst.regMask = uint8_t(regMask & 0xFFu);
+ combinedOpFlags |= opFlags;
+ }
+
+ // Decrease the number of operands of those that are none. This is important
+ // as Assembler and Compiler may just pass more operands padded with none
+ // (which means that no operand is given at that index). However, validate
+ // that there are no gaps (like [reg, none, reg] or [none, reg]).
+ if (i < opCount) {
+ while (--opCount > i)
+ if (ASMJIT_UNLIKELY(!operands[opCount].isNone()))
+ return DebugUtils::errored(kErrorInvalidInstruction);
+ }
+
+ // Validate X86 and X64 specific cases.
+ if (mode == InstDB::kModeX86) {
+ // Illegal use of 64-bit register in 32-bit mode.
+ if (ASMJIT_UNLIKELY((combinedOpFlags & InstDB::kOpGpq) != 0))
+ return DebugUtils::errored(kErrorInvalidUseOfGpq);
+ }
+ else {
+ // Illegal use of a high 8-bit register with REX prefix.
+ if (ASMJIT_UNLIKELY((combinedOpFlags & InstDB::kOpGpbHi) != 0 && (combinedRegMask & 0xFFFFFF00u) != 0))
+ return DebugUtils::errored(kErrorInvalidUseOfGpbHi);
+ }
+
+ // --------------------------------------------------------------------------
+ // [Validate Instruction Signature by Comparing Against All `iSig` Rows]
+ // --------------------------------------------------------------------------
+
+ const InstDB::InstSignature* iSig = InstDB::_instSignatureTable + commonInfo._iSignatureIndex;
+ const InstDB::InstSignature* iEnd = iSig + commonInfo._iSignatureCount;
+
+ if (iSig != iEnd) {
+ const InstDB::OpSignature* opSignatureTable = InstDB::_opSignatureTable;
+
+ // If set it means that we matched a signature where only immediate value
+ // was out of bounds. We can return a more descriptive error if we know this.
+ bool globalImmOutOfRange = false;
+
+ do {
+ // Check if the architecture is compatible.
+ if ((iSig->modes & mode) == 0)
+ continue;
+
+ // Compare the operands table with reference operands.
+ uint32_t j = 0;
+ uint32_t iSigCount = iSig->opCount;
+ bool localImmOutOfRange = false;
+
+ if (iSigCount == opCount) {
+ for (j = 0; j < opCount; j++)
+ if (!x86CheckOSig(oSigTranslated[j], opSignatureTable[iSig->operands[j]], localImmOutOfRange))
+ break;
+ }
+ else if (iSigCount - iSig->implicit == opCount) {
+ uint32_t r = 0;
+ for (j = 0; j < opCount && r < iSigCount; j++, r++) {
+ const InstDB::OpSignature* oChk = oSigTranslated + j;
+ const InstDB::OpSignature* oRef;
+Next:
+ oRef = opSignatureTable + iSig->operands[r];
+ // Skip implicit.
+ if ((oRef->opFlags & InstDB::kOpImplicit) != 0) {
+ if (++r >= iSigCount)
+ break;
+ else
+ goto Next;
+ }
+
+ if (!x86CheckOSig(*oChk, *oRef, localImmOutOfRange))
+ break;
+ }
+ }
+
+ if (j == opCount) {
+ if (!localImmOutOfRange) {
+ // Match, must clear possible `globalImmOutOfRange`.
+ globalImmOutOfRange = false;
+ break;
+ }
+ globalImmOutOfRange = localImmOutOfRange;
+ }
+ } while (++iSig != iEnd);
+
+ if (iSig == iEnd) {
+ if (globalImmOutOfRange)
+ return DebugUtils::errored(kErrorInvalidImmediate);
+ else
+ return DebugUtils::errored(kErrorInvalidInstruction);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Validate AVX512 Options]
+ // --------------------------------------------------------------------------
+
+ const RegOnly& extraReg = inst.extraReg();
+ const uint32_t kAvx512Options = Inst::kOptionZMask |
+ Inst::kOptionER |
+ Inst::kOptionSAE ;
+
+ if (options & kAvx512Options) {
+ if (commonInfo.hasFlag(InstDB::kFlagEvex)) {
+ // Validate AVX-512 {z}.
+ if ((options & Inst::kOptionZMask)) {
+ if (ASMJIT_UNLIKELY((options & Inst::kOptionZMask) != 0 && !commonInfo.hasAvx512Z()))
+ return DebugUtils::errored(kErrorInvalidKZeroUse);
+ }
+
+ // Validate AVX-512 {sae} and {er}.
+ if (options & (Inst::kOptionSAE | Inst::kOptionER)) {
+ // Rounding control is impossible if the instruction is not reg-to-reg.
+ if (ASMJIT_UNLIKELY(memOp))
+ return DebugUtils::errored(kErrorInvalidEROrSAE);
+
+ // Check if {sae} or {er} is supported by the instruction.
+ if (options & Inst::kOptionER) {
+ // NOTE: if both {sae} and {er} are set, we don't care, as {sae} is implied.
+ if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512ER()))
+ return DebugUtils::errored(kErrorInvalidEROrSAE);
+ }
+ else {
+ if (ASMJIT_UNLIKELY(!commonInfo.hasAvx512SAE()))
+ return DebugUtils::errored(kErrorInvalidEROrSAE);
+ }
+
+ // {sae} and {er} are defined for either scalar ops or vector ops that
+ // require LL to be 10 (512-bit vector operations). We don't need any
+ // more bits in the instruction database to be able to validate this, as
+ // each AVX512 instruction that has broadcast is vector instruction (in
+ // this case we require zmm registers), otherwise it's a scalar instruction,
+ // which is valid.
+ if (commonInfo.hasAvx512B()) {
+ // Supports broadcast, thus we require LL to be '10', which means there
+ // have to be ZMM registers used. We don't calculate LL here, but we know
+ // that it would be '10' if there is at least one ZMM register used.
+
+ // There is no {er}/{sae}-enabled instruction with less than two operands.
+ ASMJIT_ASSERT(opCount >= 2);
+ if (ASMJIT_UNLIKELY(!x86IsZmmOrM512(operands[0]) && !x86IsZmmOrM512(operands[1])))
+ return DebugUtils::errored(kErrorInvalidEROrSAE);
+ }
+ }
+ }
+ else {
+ // Not AVX512 instruction - maybe OpExtra is xCX register used by REP/REPNE
+ // prefix. Otherwise the instruction is invalid.
+ if ((options & kAvx512Options) || (options & kRepAny) == 0)
+ return DebugUtils::errored(kErrorInvalidInstruction);
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // [Validate {Extra} Register]
+ // --------------------------------------------------------------------------
+
+ if (extraReg.isReg()) {
+ if (options & kRepAny) {
+ // Validate REP|REPNE {cx|ecx|rcx}.
+ if (ASMJIT_UNLIKELY(iFlags & InstDB::kFlagRepIgnored))
+ return DebugUtils::errored(kErrorInvalidExtraReg);
+
+ if (extraReg.isPhysReg()) {
+ if (ASMJIT_UNLIKELY(extraReg.id() != Gp::kIdCx))
+ return DebugUtils::errored(kErrorInvalidExtraReg);
+ }
+
+ // The type of the {...} register must match the type of the base register
+ // of memory operand. So if the memory operand uses 32-bit register the
+ // count register must also be 32-bit, etc...
+ if (ASMJIT_UNLIKELY(!memOp || extraReg.type() != memOp->baseType()))
+ return DebugUtils::errored(kErrorInvalidExtraReg);
+ }
+ else if (commonInfo.hasFlag(InstDB::kFlagEvex)) {
+ // Validate AVX-512 {k}.
+ if (ASMJIT_UNLIKELY(extraReg.type() != Reg::kTypeKReg))
+ return DebugUtils::errored(kErrorInvalidExtraReg);
+
+ if (ASMJIT_UNLIKELY(extraReg.id() == 0 || !commonInfo.hasAvx512K()))
+ return DebugUtils::errored(kErrorInvalidKMaskUse);
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidExtraReg);
+ }
+ }
+
+ return kErrorOk;
+}
+#endif // !ASMJIT_NO_VALIDATION
+
+// ============================================================================
+// [asmjit::x86::InstInternal - QueryRWInfo]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+static const uint64_t rwRegGroupByteMask[Reg::kGroupCount] = {
+ 0x00000000000000FFu, // GP.
+ 0xFFFFFFFFFFFFFFFFu, // XMM|YMM|ZMM.
+ 0x00000000000000FFu, // MM.
+ 0x00000000000000FFu, // KReg.
+ 0x0000000000000003u, // SReg.
+ 0x00000000000000FFu, // CReg.
+ 0x00000000000000FFu, // DReg.
+ 0x00000000000003FFu, // St().
+ 0x000000000000FFFFu, // BND.
+ 0x00000000000000FFu // RIP.
+};
+
+// TODO: Make universal.
+static ASMJIT_INLINE uint32_t gpRegSizeByArchId(uint32_t archId) noexcept {
+ static const uint8_t table[] = { 0, 4, 8, 4, 8 };
+ return table[archId];
+}
+
+static ASMJIT_INLINE void rwZeroExtendGp(OpRWInfo& opRwInfo, const Gp& reg, uint32_t nativeGpSize) noexcept {
+ ASMJIT_ASSERT(BaseReg::isGp(reg.as<Operand>()));
+ if (reg.size() + 4 == nativeGpSize) {
+ opRwInfo.addOpFlags(OpRWInfo::kZExt);
+ opRwInfo.setExtendByteMask(~opRwInfo.writeByteMask() & 0xFFu);
+ }
+}
+
+static ASMJIT_INLINE void rwZeroExtendAvxVec(OpRWInfo& opRwInfo, const Vec& reg) noexcept {
+ DebugUtils::unused(reg);
+
+ uint64_t msk = ~Support::fillTrailingBits(opRwInfo.writeByteMask());
+ if (msk) {
+ opRwInfo.addOpFlags(OpRWInfo::kZExt);
+ opRwInfo.setExtendByteMask(msk);
+ }
+}
+
+static ASMJIT_INLINE void rwZeroExtendNonVec(OpRWInfo& opRwInfo, const Reg& reg) noexcept {
+ uint64_t msk = ~Support::fillTrailingBits(opRwInfo.writeByteMask()) & rwRegGroupByteMask[reg.group()];
+ if (msk) {
+ opRwInfo.addOpFlags(OpRWInfo::kZExt);
+ opRwInfo.setExtendByteMask(msk);
+ }
+}
+
+Error InstInternal::queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept {
+ using namespace Status;
+
+ // Only called when `archId` matches X86 family.
+ ASMJIT_ASSERT(ArchInfo::isX86Family(archId));
+
+ // Get the instruction data.
+ uint32_t instId = inst.id();
+ if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId)))
+ return DebugUtils::errored(kErrorInvalidInstruction);
+
+ // Read/Write flags.
+ const InstDB::CommonInfoTableB& tabB = InstDB::_commonInfoTableB[InstDB::_instInfoTable[instId]._commonInfoIndexB];
+ const InstDB::RWFlagsInfoTable& rwFlags = InstDB::_rwFlagsInfoTable[tabB._rwFlagsIndex];
+
+ // Each RWInfo contains two indexes
+ // [0] - OpCount == 2
+ // [1] - OpCount != 2
+ // They are used this way as there are instructions that have 2 and 3
+ // operand overloads that use different semantics. So instead of adding
+ // more special cases we just separated their data tables.
+ const InstDB::RWInfo& instRwInfo = InstDB::rwInfo[InstDB::rwInfoIndex[instId * 2u + uint32_t(opCount != 2)]];
+ const InstDB::RWInfoRm& instRmInfo = InstDB::rwInfoRm[instRwInfo.rmInfo];
+
+ out._instFlags = 0;
+ out._opCount = uint8_t(opCount);
+ out._rmFeature = instRmInfo.rmFeature;
+ out._extraReg.reset();
+ out._readFlags = rwFlags.readFlags;
+ out._writeFlags = rwFlags.writeFlags;
+
+ uint32_t nativeGpSize = gpRegSizeByArchId(archId);
+
+ constexpr uint32_t R = OpRWInfo::kRead;
+ constexpr uint32_t W = OpRWInfo::kWrite;
+ constexpr uint32_t X = OpRWInfo::kRW;
+ constexpr uint32_t RegM = OpRWInfo::kRegMem;
+ constexpr uint32_t RegPhys = OpRWInfo::kRegPhysId;
+ constexpr uint32_t MibRead = OpRWInfo::kMemBaseRead | OpRWInfo::kMemIndexRead;
+
+ if (ASMJIT_LIKELY(instRwInfo.category == InstDB::RWInfo::kCategoryGeneric)) {
+ uint32_t i;
+ uint32_t rmOpsMask = 0;
+ uint32_t rmMaxSize = 0;
+
+ for (i = 0; i < opCount; i++) {
+ OpRWInfo& op = out._operands[i];
+ const Operand_& srcOp = operands[i];
+ const InstDB::RWInfoOp& rwOpData = InstDB::rwInfoOp[instRwInfo.opInfoIndex[i]];
+
+ if (!srcOp.isRegOrMem()) {
+ op.reset();
+ continue;
+ }
+
+ op._opFlags = rwOpData.flags & ~(OpRWInfo::kZExt);
+ op._physId = rwOpData.physId;
+ op._rmSize = 0;
+ op._resetReserved();
+
+ uint64_t rByteMask = rwOpData.rByteMask;
+ uint64_t wByteMask = rwOpData.wByteMask;
+
+ if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask<uint64_t>(srcOp.size());
+ if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask<uint64_t>(srcOp.size());
+
+ op._readByteMask = rByteMask;
+ op._writeByteMask = wByteMask;
+ op._extendByteMask = 0;
+
+ if (srcOp.isReg()) {
+ // Zero extension.
+ if (op.isWrite()) {
+ if (srcOp.as<Reg>().isGp()) {
+ // GP registers on X64 are special:
+ // - 8-bit and 16-bit writes aren't zero extended.
+ // - 32-bit writes ARE zero extended.
+ rwZeroExtendGp(op, srcOp.as<Gp>(), nativeGpSize);
+ }
+ else if (rwOpData.flags & OpRWInfo::kZExt) {
+ // Otherwise follow ZExt.
+ rwZeroExtendNonVec(op, srcOp.as<Gp>());
+ }
+ }
+
+ // Aggregate values required to calculate valid Reg/M info.
+ rmMaxSize = Support::max(rmMaxSize, srcOp.size());
+ rmOpsMask |= Support::bitMask<uint32_t>(i);
+ }
+ else {
+ op.addOpFlags(MibRead);
+ }
+ }
+
+ rmOpsMask &= instRmInfo.rmOpsMask;
+ if (rmOpsMask) {
+ Support::BitWordIterator<uint32_t> it(rmOpsMask);
+ do {
+ i = it.next();
+
+ OpRWInfo& op = out._operands[i];
+ op.addOpFlags(RegM);
+
+ switch (instRmInfo.category) {
+ case InstDB::RWInfoRm::kCategoryFixed:
+ op.setRmSize(instRmInfo.fixedSize);
+ break;
+ case InstDB::RWInfoRm::kCategoryConsistent:
+ op.setRmSize(operands[i].size());
+ break;
+ case InstDB::RWInfoRm::kCategoryHalf:
+ op.setRmSize(rmMaxSize / 2u);
+ break;
+ case InstDB::RWInfoRm::kCategoryQuarter:
+ op.setRmSize(rmMaxSize / 4u);
+ break;
+ case InstDB::RWInfoRm::kCategoryEighth:
+ op.setRmSize(rmMaxSize / 8u);
+ break;
+ }
+ } while (it.hasNext());
+ }
+
+ return kErrorOk;
+ }
+
+ switch (instRwInfo.category) {
+ case InstDB::RWInfo::kCategoryMov: {
+ // Special case for 'movhpd' instruction. Here there are some variants that
+ // we have to handle as mov can be used to move between GP, segment, control
+ // and debug registers. Moving between GP registers also allow to use memory
+ // operand.
+
+ if (opCount == 2) {
+ if (operands[0].isReg() && operands[1].isReg()) {
+ const Reg& o0 = operands[0].as<Reg>();
+ const Reg& o1 = operands[1].as<Reg>();
+
+ if (o0.isGp() && o1.isGp()) {
+ out._operands[0].reset(W | RegM, operands[0].size());
+ out._operands[1].reset(R | RegM, operands[1].size());
+
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ return kErrorOk;
+ }
+
+ if (o0.isGp() && o1.isSReg()) {
+ out._operands[0].reset(W | RegM, nativeGpSize);
+ out._operands[0].setRmSize(2);
+ out._operands[1].reset(R, 2);
+ return kErrorOk;
+ }
+
+ if (o0.isSReg() && o1.isGp()) {
+ out._operands[0].reset(W, 2);
+ out._operands[1].reset(R | RegM, 2);
+ out._operands[1].setRmSize(2);
+ return kErrorOk;
+ }
+
+ if (o0.isGp() && (o1.isCReg() || o1.isDReg())) {
+ out._operands[0].reset(W, nativeGpSize);
+ out._operands[1].reset(R, nativeGpSize);
+ out._writeFlags = kOF | kSF | kZF | kAF | kPF | kCF;
+ return kErrorOk;
+ }
+
+ if ((o0.isCReg() || o0.isDReg()) && o1.isGp()) {
+ out._operands[0].reset(W, nativeGpSize);
+ out._operands[1].reset(R, nativeGpSize);
+ out._writeFlags = kOF | kSF | kZF | kAF | kPF | kCF;
+ return kErrorOk;
+ }
+ }
+
+ if (operands[0].isReg() && operands[1].isMem()) {
+ const Reg& o0 = operands[0].as<Reg>();
+ const Mem& o1 = operands[1].as<Mem>();
+
+ if (o0.isGp()) {
+ if (!o1.isOffset64Bit())
+ out._operands[0].reset(W, o0.size());
+ else
+ out._operands[0].reset(W | RegPhys, o0.size(), Gp::kIdAx);
+
+ out._operands[1].reset(R | MibRead, o0.size());
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ return kErrorOk;
+ }
+
+ if (o0.isSReg()) {
+ out._operands[0].reset(W, 2);
+ out._operands[1].reset(R, 2);
+ return kErrorOk;
+ }
+ }
+
+ if (operands[0].isMem() && operands[1].isReg()) {
+ const Mem& o0 = operands[0].as<Mem>();
+ const Reg& o1 = operands[1].as<Reg>();
+
+ if (o1.isGp()) {
+ out._operands[0].reset(W | MibRead, o1.size());
+ if (!o0.isOffset64Bit())
+ out._operands[1].reset(R, o1.size());
+ else
+ out._operands[1].reset(R | RegPhys, o1.size(), Gp::kIdAx);
+ return kErrorOk;
+ }
+
+ if (o1.isSReg()) {
+ out._operands[0].reset(W | MibRead, 2);
+ out._operands[1].reset(R, 2);
+ return kErrorOk;
+ }
+ }
+
+ if (Reg::isGp(operands[0]) && operands[1].isImm()) {
+ const Reg& o0 = operands[0].as<Reg>();
+ out._operands[0].reset(W | RegM, o0.size());
+ out._operands[1].reset();
+
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ return kErrorOk;
+ }
+
+ if (operands[0].isMem() && operands[1].isImm()) {
+ const Reg& o0 = operands[0].as<Reg>();
+ out._operands[0].reset(W | MibRead, o0.size());
+ out._operands[1].reset();
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryImul: {
+ // Special case for 'imul' instruction.
+ //
+ // There are 3 variants in general:
+ //
+ // 1. Standard multiplication: 'A = A * B'.
+ // 2. Multiplication with imm: 'A = B * C'.
+ // 3. Extended multiplication: 'A:B = B * C'.
+
+ if (opCount == 2) {
+ if (operands[0].isReg() && operands[1].isImm()) {
+ out._operands[0].reset(X, operands[0].size());
+ out._operands[1].reset();
+
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ return kErrorOk;
+ }
+
+ if (Reg::isGpw(operands[0]) && operands[1].size() == 1) {
+ // imul ax, r8/m8 <- AX = AL * r8/m8
+ out._operands[0].reset(X | RegPhys, 2, Gp::kIdAx);
+ out._operands[0].setReadByteMask(Support::lsbMask<uint64_t>(1));
+ out._operands[1].reset(R | RegM, 1);
+ }
+ else {
+ // imul r?, r?/m?
+ out._operands[0].reset(X, operands[0].size());
+ out._operands[1].reset(R | RegM, operands[0].size());
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ }
+
+ if (operands[1].isMem())
+ out._operands[1].addOpFlags(MibRead);
+ return kErrorOk;
+ }
+
+ if (opCount == 3) {
+ if (operands[2].isImm()) {
+ out._operands[0].reset(W, operands[0].size());
+ out._operands[1].reset(R | RegM, operands[1].size());
+ out._operands[2].reset();
+
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ if (operands[1].isMem())
+ out._operands[1].addOpFlags(MibRead);
+ return kErrorOk;
+ }
+ else {
+ out._operands[0].reset(W | RegPhys, operands[0].size(), Gp::kIdDx);
+ out._operands[1].reset(X | RegPhys, operands[1].size(), Gp::kIdAx);
+ out._operands[2].reset(R | RegM, operands[2].size());
+
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+ rwZeroExtendGp(out._operands[1], operands[1].as<Gp>(), nativeGpSize);
+ if (operands[2].isMem())
+ out._operands[2].addOpFlags(MibRead);
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryMovh64: {
+ // Special case for 'movhpd|movhps' instructions. Note that this is only
+ // required for legacy (non-AVX) variants as AVX instructions use either
+ // 2 or 3 operands that are use `kCategoryGeneric`.
+ if (opCount == 2) {
+ if (BaseReg::isVec(operands[0]) && operands[1].isMem()) {
+ out._operands[0].reset(W, 8);
+ out._operands[0].setWriteByteMask(Support::lsbMask<uint64_t>(8) << 8);
+ out._operands[1].reset(R | MibRead, 8);
+ return kErrorOk;
+ }
+
+ if (operands[0].isMem() && BaseReg::isVec(operands[1])) {
+ out._operands[0].reset(W | MibRead, 8);
+ out._operands[1].reset(R, 8);
+ out._operands[1].setReadByteMask(Support::lsbMask<uint64_t>(8) << 8);
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryVmaskmov: {
+ // Special case for 'vmaskmovpd|vmaskmovps|vpmaskmovd|vpmaskmovq' instructions.
+ if (opCount == 3) {
+ if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1]) && operands[2].isMem()) {
+ out._operands[0].reset(W, operands[0].size());
+ out._operands[1].reset(R, operands[1].size());
+ out._operands[2].reset(R | MibRead, operands[1].size());
+
+ rwZeroExtendAvxVec(out._operands[0], operands[0].as<Vec>());
+ return kErrorOk;
+ }
+
+ if (operands[0].isMem() && BaseReg::isVec(operands[1]) && BaseReg::isVec(operands[2])) {
+ out._operands[0].reset(X | MibRead, operands[1].size());
+ out._operands[1].reset(R, operands[1].size());
+ out._operands[2].reset(R, operands[2].size());
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryVmovddup: {
+ // Special case for 'vmovddup' instruction. This instruction has an
+ // interesting semantic as 128-bit XMM version only uses 64-bit memory
+ // operand (m64), however, 256/512-bit versions use 256/512-bit memory
+ // operand, respectively.
+ if (opCount == 2) {
+ if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1])) {
+ uint32_t o0Size = operands[0].size();
+ uint32_t o1Size = o0Size == 16 ? 8 : o0Size;
+
+ out._operands[0].reset(W, o0Size);
+ out._operands[1].reset(R | RegM, o1Size);
+ out._operands[1]._readByteMask &= 0x00FF00FF00FF00FFu;
+
+ rwZeroExtendAvxVec(out._operands[0], operands[0].as<Vec>());
+ return kErrorOk;
+ }
+
+ if (BaseReg::isVec(operands[0]) && operands[1].isMem()) {
+ uint32_t o0Size = operands[0].size();
+ uint32_t o1Size = o0Size == 16 ? 8 : o0Size;
+
+ out._operands[0].reset(W, o0Size);
+ out._operands[1].reset(R | MibRead, o1Size);
+
+ rwZeroExtendAvxVec(out._operands[0], operands[0].as<Vec>());
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryVmovmskpd:
+ case InstDB::RWInfo::kCategoryVmovmskps: {
+ // Special case for 'vmovmskpd|vmovmskps' instructions.
+ if (opCount == 2) {
+ if (BaseReg::isGp(operands[0]) && BaseReg::isVec(operands[1])) {
+ out._operands[0].reset(W, 1);
+ out._operands[0].setExtendByteMask(Support::lsbMask<uint32_t>(nativeGpSize - 1) << 1);
+ out._operands[1].reset(R, operands[1].size());
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryVmov1_2:
+ case InstDB::RWInfo::kCategoryVmov1_4:
+ case InstDB::RWInfo::kCategoryVmov1_8: {
+ // Special case for instructions where the destination is 1:N (narrowing).
+ //
+ // Vmov1_2:
+ // vcvtpd2dq|vcvttpd2dq
+ // vcvtpd2udq|vcvttpd2udq
+ // vcvtpd2ps|vcvtps2ph
+ // vcvtqq2ps|vcvtuqq2ps
+ // vpmovwb|vpmovswb|vpmovuswb
+ // vpmovdw|vpmovsdw|vpmovusdw
+ // vpmovqd|vpmovsqd|vpmovusqd
+ //
+ // Vmov1_4:
+ // vpmovdb|vpmovsdb|vpmovusdb
+ // vpmovqw|vpmovsqw|vpmovusqw
+ //
+ // Vmov1_8:
+ // pmovmskb|vpmovmskb
+ // vpmovqb|vpmovsqb|vpmovusqb
+ uint32_t shift = instRwInfo.category - InstDB::RWInfo::kCategoryVmov1_2 + 1;
+
+ if (opCount >= 2) {
+ if (opCount >= 3) {
+ if (opCount > 3)
+ return DebugUtils::errored(kErrorInvalidInstruction);
+ out._operands[2].reset();
+ }
+
+ if (operands[0].isReg() && operands[1].isReg()) {
+ uint32_t size1 = operands[1].size();
+ uint32_t size0 = size1 >> shift;
+
+ out._operands[0].reset(W, size0);
+ out._operands[1].reset(R, size1);
+
+ if (instRmInfo.rmOpsMask & 0x1) {
+ out._operands[0].addOpFlags(RegM);
+ out._operands[0].setRmSize(size0);
+ }
+
+ if (instRmInfo.rmOpsMask & 0x2) {
+ out._operands[1].addOpFlags(RegM);
+ out._operands[1].setRmSize(size1);
+ }
+
+ // Handle 'pmovmskb|vpmovmskb'.
+ if (BaseReg::isGp(operands[0]))
+ rwZeroExtendGp(out._operands[0], operands[0].as<Gp>(), nativeGpSize);
+
+ if (BaseReg::isVec(operands[0]))
+ rwZeroExtendAvxVec(out._operands[0], operands[0].as<Vec>());
+
+ return kErrorOk;
+ }
+
+ if (operands[0].isReg() && operands[1].isMem()) {
+ uint32_t size1 = operands[1].size() ? operands[1].size() : uint32_t(16);
+ uint32_t size0 = size1 >> shift;
+
+ out._operands[0].reset(W, size0);
+ out._operands[1].reset(R | MibRead, size1);
+ return kErrorOk;
+ }
+
+ if (operands[0].isMem() && operands[1].isReg()) {
+ uint32_t size1 = operands[1].size();
+ uint32_t size0 = size1 >> shift;
+
+ out._operands[0].reset(W | MibRead, size0);
+ out._operands[1].reset(R, size1);
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+
+ case InstDB::RWInfo::kCategoryVmov2_1:
+ case InstDB::RWInfo::kCategoryVmov4_1:
+ case InstDB::RWInfo::kCategoryVmov8_1: {
+ // Special case for instructions where the destination is N:1 (widening).
+ //
+ // Vmov2_1:
+ // vcvtdq2pd|vcvtudq2pd
+ // vcvtps2pd|vcvtph2ps
+ // vcvtps2qq|vcvtps2uqq
+ // vcvttps2qq|vcvttps2uqq
+ // vpmovsxbw|vpmovzxbw
+ // vpmovsxwd|vpmovzxwd
+ // vpmovsxdq|vpmovzxdq
+ //
+ // Vmov4_1:
+ // vpmovsxbd|vpmovzxbd
+ // vpmovsxwq|vpmovzxwq
+ //
+ // Vmov8_1:
+ // vpmovsxbq|vpmovzxbq
+ uint32_t shift = instRwInfo.category - InstDB::RWInfo::kCategoryVmov2_1 + 1;
+
+ if (opCount >= 2) {
+ if (opCount >= 3) {
+ if (opCount > 3)
+ return DebugUtils::errored(kErrorInvalidInstruction);
+ out._operands[2].reset();
+ }
+
+ uint32_t size0 = operands[0].size();
+ uint32_t size1 = size0 >> shift;
+
+ out._operands[0].reset(W, size0);
+ out._operands[1].reset(R, size1);
+
+ if (operands[0].isReg() && operands[1].isReg()) {
+ if (instRmInfo.rmOpsMask & 0x1) {
+ out._operands[0].addOpFlags(RegM);
+ out._operands[0].setRmSize(size0);
+ }
+
+ if (instRmInfo.rmOpsMask & 0x2) {
+ out._operands[1].addOpFlags(RegM);
+ out._operands[1].setRmSize(size1);
+ }
+ return kErrorOk;
+ }
+
+ if (operands[0].isReg() && operands[1].isMem()) {
+ out._operands[1].addOpFlags(MibRead);
+ return kErrorOk;
+ }
+ }
+ break;
+ }
+ }
+
+ return DebugUtils::errored(kErrorInvalidInstruction);
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+// ============================================================================
+// [asmjit::x86::InstInternal - QueryFeatures]
+// ============================================================================
+
+#ifndef ASMJIT_NO_INTROSPECTION
+struct RegAnalysis {
+ uint32_t regTypeMask;
+ uint32_t highVecUsed;
+
+ inline bool hasRegType(uint32_t regType) const noexcept {
+ return Support::bitTest(regTypeMask, regType);
+ }
+};
+
+static RegAnalysis InstInternal_regAnalysis(const Operand_* operands, uint32_t opCount) noexcept {
+ uint32_t mask = 0;
+ uint32_t highVecUsed = 0;
+
+ for (uint32_t i = 0; i < opCount; i++) {
+ const Operand_& op = operands[i];
+ if (op.isReg()) {
+ const BaseReg& reg = op.as<BaseReg>();
+ mask |= Support::bitMask(reg.type());
+ if (reg.isVec())
+ highVecUsed |= uint32_t(reg.id() >= 16 && reg.id() < 32);
+ }
+ else if (op.isMem()) {
+ const BaseMem& mem = op.as<BaseMem>();
+ if (mem.hasBaseReg()) mask |= Support::bitMask(mem.baseType());
+ if (mem.hasIndexReg()) {
+ mask |= Support::bitMask(mem.indexType());
+ highVecUsed |= uint32_t(mem.indexId() >= 16 && mem.indexId() < 32);
+ }
+ }
+ }
+
+ return RegAnalysis { mask, highVecUsed };
+}
+
+Error InstInternal::queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept {
+ // Only called when `archId` matches X86 family.
+ DebugUtils::unused(archId);
+ ASMJIT_ASSERT(ArchInfo::isX86Family(archId));
+
+ // Get the instruction data.
+ uint32_t instId = inst.id();
+ uint32_t options = inst.options();
+
+ if (ASMJIT_UNLIKELY(!Inst::isDefinedId(instId)))
+ return DebugUtils::errored(kErrorInvalidInstruction);
+
+ const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
+ const InstDB::CommonInfoTableB& tableB = InstDB::_commonInfoTableB[instInfo._commonInfoIndexB];
+
+ const uint8_t* fData = tableB.featuresBegin();
+ const uint8_t* fEnd = tableB.featuresEnd();
+
+ // Copy all features to `out`.
+ out.reset();
+ do {
+ uint32_t feature = fData[0];
+ if (!feature)
+ break;
+ out.add(feature);
+ } while (++fData != fEnd);
+
+ // Since AsmJit aggregates instructions that share the same name we have to
+ // deal with some special cases and also with MMX/SSE and AVX/AVX2 overlaps.
+ if (fData != tableB.featuresBegin()) {
+ RegAnalysis regAnalysis = InstInternal_regAnalysis(operands, opCount);
+
+ // Handle MMX vs SSE overlap.
+ if (out.has(Features::kMMX) || out.has(Features::kMMX2)) {
+ // Only instructions defined by SSE and SSE2 overlap. Instructions
+ // introduced by newer instruction sets like SSE3+ don't state MMX as
+ // they require SSE3+.
+ if (out.has(Features::kSSE) || out.has(Features::kSSE2)) {
+ if (!regAnalysis.hasRegType(Reg::kTypeXmm)) {
+ // The instruction doesn't use XMM register(s), thus it's MMX/MMX2 only.
+ out.remove(Features::kSSE);
+ out.remove(Features::kSSE2);
+ }
+ else {
+ out.remove(Features::kMMX);
+ out.remove(Features::kMMX2);
+ }
+
+ // Special case: PEXTRW instruction is MMX/SSE2 instruction. However,
+ // MMX/SSE version cannot access memory (only register to register
+ // extract) so when SSE4.1 introduced the whole family of PEXTR/PINSR
+ // instructions they also introduced PEXTRW with a new opcode 0x15 that
+ // can extract directly to memory. This instruction is, of course, not
+ // compatible with MMX/SSE2 and would #UD if SSE4.1 is not supported.
+ if (instId == Inst::kIdPextrw) {
+ ASMJIT_ASSERT(out.has(Features::kSSE2));
+ ASMJIT_ASSERT(out.has(Features::kSSE4_1));
+
+ if (opCount >= 1 && operands[0].isMem())
+ out.remove(Features::kSSE2);
+ else
+ out.remove(Features::kSSE4_1);
+ }
+ }
+ }
+
+ // Handle PCLMULQDQ vs VPCLMULQDQ.
+ if (out.has(Features::kVPCLMULQDQ)) {
+ if (regAnalysis.hasRegType(Reg::kTypeZmm) || Support::bitTest(options, Inst::kOptionEvex)) {
+ // AVX512_F & VPCLMULQDQ.
+ out.remove(Features::kAVX, Features::kPCLMULQDQ);
+ }
+ else if (regAnalysis.hasRegType(Reg::kTypeYmm)) {
+ out.remove(Features::kAVX512_F, Features::kAVX512_VL);
+ }
+ else {
+ // AVX & PCLMULQDQ.
+ out.remove(Features::kAVX512_F, Features::kAVX512_VL, Features::kVPCLMULQDQ);
+ }
+ }
+
+ // Handle AVX vs AVX2 overlap.
+ if (out.has(Features::kAVX) && out.has(Features::kAVX2)) {
+ bool isAVX2 = true;
+ // Special case: VBROADCASTSS and VBROADCASTSD were introduced in AVX, but
+ // only version that uses memory as a source operand. AVX2 then added support
+ // for register source operand.
+ if (instId == Inst::kIdVbroadcastss || instId == Inst::kIdVbroadcastsd) {
+ if (opCount > 1 && operands[1].isMem())
+ isAVX2 = false;
+ }
+ else {
+ // AVX instruction set doesn't support integer operations on YMM registers
+ // as these were later introcuced by AVX2. In our case we have to check if
+ // YMM register(s) are in use and if that is the case this is an AVX2 instruction.
+ if (!(regAnalysis.regTypeMask & Support::bitMask(Reg::kTypeYmm, Reg::kTypeZmm)))
+ isAVX2 = false;
+ }
+
+ if (isAVX2)
+ out.remove(Features::kAVX);
+ else
+ out.remove(Features::kAVX2);
+ }
+
+ // Handle AVX|AVX2|FMA|F16C vs AVX512 overlap.
+ if (out.has(Features::kAVX) || out.has(Features::kAVX2) || out.has(Features::kFMA) || out.has(Features::kF16C)) {
+ // Only AVX512-F|BW|DQ allow to encode AVX/AVX2/FMA/F16C instructions
+ if (out.has(Features::kAVX512_F) || out.has(Features::kAVX512_BW) || out.has(Features::kAVX512_DQ)) {
+ uint32_t hasEvex = options & (Inst::kOptionEvex | Inst::_kOptionAvx512Mask);
+ uint32_t hasKMask = inst.extraReg().type() == Reg::kTypeKReg;
+ uint32_t hasKOrZmm = regAnalysis.regTypeMask & Support::bitMask(Reg::kTypeZmm, Reg::kTypeKReg);
+
+ uint32_t mustUseEvex = 0;
+
+ switch (instId) {
+ // Special case: VPSLLDQ and VPSRLDQ instructions only allow `reg, reg. imm`
+ // combination in AVX|AVX2 mode, then AVX-512 introduced `reg, reg/mem, imm`
+ // combination that uses EVEX prefix. This means that if the second operand
+ // is memory then this is AVX-512_BW instruction and not AVX/AVX2 instruction.
+ case Inst::kIdVpslldq:
+ case Inst::kIdVpsrldq:
+ mustUseEvex = opCount >= 2 && operands[1].isMem();
+ break;
+
+ // Special case: VPBROADCAST[B|D|Q|W] only supports r32/r64 with EVEX prefix.
+ case Inst::kIdVpbroadcastb:
+ case Inst::kIdVpbroadcastd:
+ case Inst::kIdVpbroadcastq:
+ case Inst::kIdVpbroadcastw:
+ mustUseEvex = opCount >= 2 && x86::Reg::isGp(operands[1]);
+ break;
+
+ // Special case: VPERMPD only supports YMM predicate in AVX mode, immediate
+ // precicate is only supported by AVX512-F and newer.
+ case Inst::kIdVpermpd:
+ mustUseEvex = opCount >= 3 && !operands[2].isImm();
+ break;
+ }
+
+ if (!(hasEvex | mustUseEvex | hasKMask | hasKOrZmm | regAnalysis.highVecUsed))
+ out.remove(Features::kAVX512_F, Features::kAVX512_BW, Features::kAVX512_DQ, Features::kAVX512_VL);
+ else
+ out.remove(Features::kAVX, Features::kAVX2, Features::kFMA, Features::kF16C);
+ }
+ }
+
+ // Clear AVX512_VL if ZMM register is used.
+ if (regAnalysis.hasRegType(Reg::kTypeZmm))
+ out.remove(Features::kAVX512_VL);
+ }
+
+ return kErrorOk;
+}
+#endif // !ASMJIT_NO_INTROSPECTION
+
+// ============================================================================
+// [asmjit::x86::InstInternal - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(x86_inst_api_text) {
+ // All known instructions should be matched.
+ INFO("Matching all X86 instructions");
+ for (uint32_t a = 1; a < Inst::_kIdCount; a++) {
+ StringTmp<128> aName;
+ EXPECT(InstInternal::instIdToString(0, a, aName) == kErrorOk,
+ "Failed to get the name of instruction #%u", a);
+
+ uint32_t b = InstInternal::stringToInstId(0, aName.data(), aName.size());
+ StringTmp<128> bName;
+ InstInternal::instIdToString(0, b, bName);
+
+ EXPECT(a == b,
+ "Instructions do not match \"%s\" (#%u) != \"%s\" (#%u)", aName.data(), a, bName.data(), b);
+ }
+}
+#endif
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h b/3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h
new file mode 100644
index 00000000000..0389cf562a9
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86instapi_p.h
@@ -0,0 +1,59 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86INSTAPI_P_H_INCLUDED
+#define ASMJIT_X86_X86INSTAPI_P_H_INCLUDED
+
+#include "../core/inst.h"
+#include "../core/operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_x86
+//! \{
+
+namespace InstInternal {
+
+#ifndef ASMJIT_NO_TEXT
+Error instIdToString(uint32_t archId, uint32_t instId, String& output) noexcept;
+uint32_t stringToInstId(uint32_t archId, const char* s, size_t len) noexcept;
+#endif // !ASMJIT_NO_TEXT
+
+#ifndef ASMJIT_NO_VALIDATION
+Error validate(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept;
+#endif // !ASMJIT_NO_VALIDATION
+
+#ifndef ASMJIT_NO_INTROSPECTION
+Error queryRWInfo(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, InstRWInfo& out) noexcept;
+Error queryFeatures(uint32_t archId, const BaseInst& inst, const Operand_* operands, uint32_t opCount, BaseFeatures& out) noexcept;
+#endif // !ASMJIT_NO_INTROSPECTION
+
+} // {InstInternal}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86INSTAPI_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp b/3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp
new file mode 100644
index 00000000000..2d4855848f8
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86instdb.cpp
@@ -0,0 +1,3983 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+// ----------------------------------------------------------------------------
+// IMPORTANT: AsmJit now uses an external instruction database to populate
+// static tables within this file. Perform the following steps to regenerate
+// all tables enclosed by ${...}:
+//
+// 1. Install node.js environment <https://nodejs.org>
+// 2. Go to asmjit/tools directory
+// 3. Get the latest asmdb from <https://github.com/asmjit/asmdb> and
+// copy/link the `asmdb` directory to `asmjit/tools/asmdb`.
+// 4. Execute `node tablegen-x86.js`
+//
+// Instruction encoding and opcodes were added to the `x86inst.cpp` database
+// manually in the past and they are not updated by the script as it became
+// tricky. However, everything else is updated including instruction operands
+// and tables required to validate them, instruction read/write information
+// (including registers and flags), and all indexes to all tables.
+// ----------------------------------------------------------------------------
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/cpuinfo.h"
+#include "../core/misc_p.h"
+#include "../core/support.h"
+#include "../x86/x86features.h"
+#include "../x86/x86instdb_p.h"
+#include "../x86/x86opcode_p.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::InstDB - InstInfo]
+// ============================================================================
+
+// Instruction opcode definitions:
+// - `O` encodes X86|MMX|SSE instructions.
+// - `V` encodes VEX|XOP|EVEX instructions.
+// - `E` encodes EVEX instructions only.
+#define O_ENCODE(VEX, PREFIX, OPCODE, O, L, W, EvexW, N, TT) \
+ ((PREFIX) | (OPCODE) | (O) | (L) | (W) | (EvexW) | (N) | (TT) | \
+ (VEX && ((PREFIX) & Opcode::kMM_Mask) != Opcode::kMM_0F ? int(Opcode::kMM_ForceVex3) : 0))
+
+#define O(PREFIX, OPCODE, O, LL, W, EvexW, N, TT) (O_ENCODE(0, Opcode::k##PREFIX, 0x##OPCODE, Opcode::kO_##O, Opcode::kLL_##LL, Opcode::kW_##W, Opcode::kEvex_W_##EvexW, Opcode::kCDSHL_##N, Opcode::kCDTT_##TT))
+#define V(PREFIX, OPCODE, O, LL, W, EvexW, N, TT) (O_ENCODE(1, Opcode::k##PREFIX, 0x##OPCODE, Opcode::kO_##O, Opcode::kLL_##LL, Opcode::kW_##W, Opcode::kEvex_W_##EvexW, Opcode::kCDSHL_##N, Opcode::kCDTT_##TT))
+#define E(PREFIX, OPCODE, O, LL, W, EvexW, N, TT) (O_ENCODE(1, Opcode::k##PREFIX, 0x##OPCODE, Opcode::kO_##O, Opcode::kLL_##LL, Opcode::kW_##W, Opcode::kEvex_W_##EvexW, Opcode::kCDSHL_##N, Opcode::kCDTT_##TT) | Opcode::kMM_ForceEvex)
+#define O_FPU(PREFIX, OPCODE, O) (Opcode::kFPU_##PREFIX | (0x##OPCODE & 0xFFu) | ((0x##OPCODE >> 8) << Opcode::kFPU_2B_Shift) | Opcode::kO_##O)
+
+// Don't store `_nameDataIndex` if instruction names are disabled. Since some
+// APIs can use `_nameDataIndex` it's much safer if it's zero if it's not defined.
+#ifndef ASMJIT_NO_TEXT
+ #define NAME_DATA_INDEX(X) X
+#else
+ #define NAME_DATA_INDEX(X) 0
+#endif
+
+// Defines an X86 instruction.
+#define INST(id, encoding, opcode0, opcode1, mainOpcodeIndex, altOpcodeIndex, nameDataIndex, commomInfoIndexA, commomInfoIndexB) { \
+ uint32_t(NAME_DATA_INDEX(nameDataIndex)), \
+ uint32_t(commomInfoIndexA), \
+ uint32_t(commomInfoIndexB), \
+ uint8_t(InstDB::kEncoding##encoding), \
+ uint8_t((opcode0) & 0xFFu), \
+ uint8_t(mainOpcodeIndex), \
+ uint8_t(altOpcodeIndex) \
+}
+
+const InstDB::InstInfo InstDB::_instInfoTable[] = {
+ /*--------------------+--------------------+------------------+--------+------------------+--------+----+----+------+----+----+
+ | Instruction | Instruction | Main Opcode | EVEX |Alternative Opcode| EVEX |Op0X|Op1X|Name-X|IdxA|IdxB|
+ | Id & Name | Encoding | (pp+mmm|op/o|L|w|W|N|TT.)|--(pp+mmm|op/o|L|w|W|N|TT.)| (auto-generated) |
+ +---------------------+--------------------+---------+----+-+-+-+-+----+---------+----+-+-+-+-+----+----+----+------+----+---*/
+ // ${InstInfo:Begin}
+ INST(None , None , 0 , 0 , 0 , 0 , 0 , 0 , 0 ), // #0
+ INST(Aaa , X86Op_xAX , O(000000,37,_,_,_,_,_,_ ), 0 , 0 , 0 , 1 , 1 , 1 ), // #1
+ INST(Aad , X86I_xAX , O(000000,D5,_,_,_,_,_,_ ), 0 , 0 , 0 , 5 , 2 , 1 ), // #2
+ INST(Aam , X86I_xAX , O(000000,D4,_,_,_,_,_,_ ), 0 , 0 , 0 , 9 , 2 , 1 ), // #3
+ INST(Aas , X86Op_xAX , O(000000,3F,_,_,_,_,_,_ ), 0 , 0 , 0 , 13 , 1 , 1 ), // #4
+ INST(Adc , X86Arith , O(000000,10,2,_,x,_,_,_ ), 0 , 1 , 0 , 17 , 3 , 2 ), // #5
+ INST(Adcx , X86Rm , O(660F38,F6,_,_,x,_,_,_ ), 0 , 2 , 0 , 21 , 4 , 3 ), // #6
+ INST(Add , X86Arith , O(000000,00,0,_,x,_,_,_ ), 0 , 0 , 0 , 761 , 3 , 1 ), // #7
+ INST(Addpd , ExtRm , O(660F00,58,_,_,_,_,_,_ ), 0 , 3 , 0 , 4814 , 5 , 4 ), // #8
+ INST(Addps , ExtRm , O(000F00,58,_,_,_,_,_,_ ), 0 , 4 , 0 , 4826 , 5 , 5 ), // #9
+ INST(Addsd , ExtRm , O(F20F00,58,_,_,_,_,_,_ ), 0 , 5 , 0 , 5048 , 6 , 4 ), // #10
+ INST(Addss , ExtRm , O(F30F00,58,_,_,_,_,_,_ ), 0 , 6 , 0 , 2955 , 7 , 5 ), // #11
+ INST(Addsubpd , ExtRm , O(660F00,D0,_,_,_,_,_,_ ), 0 , 3 , 0 , 4553 , 5 , 6 ), // #12
+ INST(Addsubps , ExtRm , O(F20F00,D0,_,_,_,_,_,_ ), 0 , 5 , 0 , 4565 , 5 , 6 ), // #13
+ INST(Adox , X86Rm , O(F30F38,F6,_,_,x,_,_,_ ), 0 , 7 , 0 , 26 , 4 , 7 ), // #14
+ INST(Aesdec , ExtRm , O(660F38,DE,_,_,_,_,_,_ ), 0 , 2 , 0 , 3010 , 5 , 8 ), // #15
+ INST(Aesdeclast , ExtRm , O(660F38,DF,_,_,_,_,_,_ ), 0 , 2 , 0 , 3018 , 5 , 8 ), // #16
+ INST(Aesenc , ExtRm , O(660F38,DC,_,_,_,_,_,_ ), 0 , 2 , 0 , 3030 , 5 , 8 ), // #17
+ INST(Aesenclast , ExtRm , O(660F38,DD,_,_,_,_,_,_ ), 0 , 2 , 0 , 3038 , 5 , 8 ), // #18
+ INST(Aesimc , ExtRm , O(660F38,DB,_,_,_,_,_,_ ), 0 , 2 , 0 , 3050 , 5 , 8 ), // #19
+ INST(Aeskeygenassist , ExtRmi , O(660F3A,DF,_,_,_,_,_,_ ), 0 , 8 , 0 , 3058 , 8 , 8 ), // #20
+ INST(And , X86Arith , O(000000,20,4,_,x,_,_,_ ), 0 , 9 , 0 , 2433 , 9 , 1 ), // #21
+ INST(Andn , VexRvm_Wx , V(000F38,F2,_,0,x,_,_,_ ), 0 , 10 , 0 , 6494 , 10 , 9 ), // #22
+ INST(Andnpd , ExtRm , O(660F00,55,_,_,_,_,_,_ ), 0 , 3 , 0 , 3091 , 5 , 4 ), // #23
+ INST(Andnps , ExtRm , O(000F00,55,_,_,_,_,_,_ ), 0 , 4 , 0 , 3099 , 5 , 5 ), // #24
+ INST(Andpd , ExtRm , O(660F00,54,_,_,_,_,_,_ ), 0 , 3 , 0 , 4067 , 11 , 4 ), // #25
+ INST(Andps , ExtRm , O(000F00,54,_,_,_,_,_,_ ), 0 , 4 , 0 , 4077 , 11 , 5 ), // #26
+ INST(Arpl , X86Mr_NoSize , O(000000,63,_,_,_,_,_,_ ), 0 , 0 , 0 , 31 , 12 , 10 ), // #27
+ INST(Bextr , VexRmv_Wx , V(000F38,F7,_,0,x,_,_,_ ), 0 , 10 , 0 , 36 , 13 , 9 ), // #28
+ INST(Blcfill , VexVm_Wx , V(XOP_M9,01,1,0,x,_,_,_ ), 0 , 11 , 0 , 42 , 14 , 11 ), // #29
+ INST(Blci , VexVm_Wx , V(XOP_M9,02,6,0,x,_,_,_ ), 0 , 12 , 0 , 50 , 14 , 11 ), // #30
+ INST(Blcic , VexVm_Wx , V(XOP_M9,01,5,0,x,_,_,_ ), 0 , 13 , 0 , 55 , 14 , 11 ), // #31
+ INST(Blcmsk , VexVm_Wx , V(XOP_M9,02,1,0,x,_,_,_ ), 0 , 11 , 0 , 61 , 14 , 11 ), // #32
+ INST(Blcs , VexVm_Wx , V(XOP_M9,01,3,0,x,_,_,_ ), 0 , 14 , 0 , 68 , 14 , 11 ), // #33
+ INST(Blendpd , ExtRmi , O(660F3A,0D,_,_,_,_,_,_ ), 0 , 8 , 0 , 3177 , 8 , 12 ), // #34
+ INST(Blendps , ExtRmi , O(660F3A,0C,_,_,_,_,_,_ ), 0 , 8 , 0 , 3186 , 8 , 12 ), // #35
+ INST(Blendvpd , ExtRm_XMM0 , O(660F38,15,_,_,_,_,_,_ ), 0 , 2 , 0 , 3195 , 15 , 12 ), // #36
+ INST(Blendvps , ExtRm_XMM0 , O(660F38,14,_,_,_,_,_,_ ), 0 , 2 , 0 , 3205 , 15 , 12 ), // #37
+ INST(Blsfill , VexVm_Wx , V(XOP_M9,01,2,0,x,_,_,_ ), 0 , 15 , 0 , 73 , 14 , 11 ), // #38
+ INST(Blsi , VexVm_Wx , V(000F38,F3,3,0,x,_,_,_ ), 0 , 16 , 0 , 81 , 14 , 9 ), // #39
+ INST(Blsic , VexVm_Wx , V(XOP_M9,01,6,0,x,_,_,_ ), 0 , 12 , 0 , 86 , 14 , 11 ), // #40
+ INST(Blsmsk , VexVm_Wx , V(000F38,F3,2,0,x,_,_,_ ), 0 , 17 , 0 , 92 , 14 , 9 ), // #41
+ INST(Blsr , VexVm_Wx , V(000F38,F3,1,0,x,_,_,_ ), 0 , 18 , 0 , 99 , 14 , 9 ), // #42
+ INST(Bndcl , X86Rm , O(F30F00,1A,_,_,_,_,_,_ ), 0 , 6 , 0 , 104 , 16 , 13 ), // #43
+ INST(Bndcn , X86Rm , O(F20F00,1B,_,_,_,_,_,_ ), 0 , 5 , 0 , 110 , 16 , 13 ), // #44
+ INST(Bndcu , X86Rm , O(F20F00,1A,_,_,_,_,_,_ ), 0 , 5 , 0 , 116 , 16 , 13 ), // #45
+ INST(Bndldx , X86Rm , O(000F00,1A,_,_,_,_,_,_ ), 0 , 4 , 0 , 122 , 17 , 13 ), // #46
+ INST(Bndmk , X86Rm , O(F30F00,1B,_,_,_,_,_,_ ), 0 , 6 , 0 , 129 , 18 , 13 ), // #47
+ INST(Bndmov , X86Bndmov , O(660F00,1A,_,_,_,_,_,_ ), O(660F00,1B,_,_,_,_,_,_ ), 3 , 1 , 135 , 19 , 13 ), // #48
+ INST(Bndstx , X86Mr , O(000F00,1B,_,_,_,_,_,_ ), 0 , 4 , 0 , 142 , 20 , 13 ), // #49
+ INST(Bound , X86Rm , O(000000,62,_,_,_,_,_,_ ), 0 , 0 , 0 , 149 , 21 , 0 ), // #50
+ INST(Bsf , X86Rm , O(000F00,BC,_,_,x,_,_,_ ), 0 , 4 , 0 , 155 , 22 , 1 ), // #51
+ INST(Bsr , X86Rm , O(000F00,BD,_,_,x,_,_,_ ), 0 , 4 , 0 , 159 , 22 , 1 ), // #52
+ INST(Bswap , X86Bswap , O(000F00,C8,_,_,x,_,_,_ ), 0 , 4 , 0 , 163 , 23 , 0 ), // #53
+ INST(Bt , X86Bt , O(000F00,A3,_,_,x,_,_,_ ), O(000F00,BA,4,_,x,_,_,_ ), 4 , 2 , 169 , 24 , 14 ), // #54
+ INST(Btc , X86Bt , O(000F00,BB,_,_,x,_,_,_ ), O(000F00,BA,7,_,x,_,_,_ ), 4 , 3 , 172 , 25 , 14 ), // #55
+ INST(Btr , X86Bt , O(000F00,B3,_,_,x,_,_,_ ), O(000F00,BA,6,_,x,_,_,_ ), 4 , 4 , 176 , 25 , 14 ), // #56
+ INST(Bts , X86Bt , O(000F00,AB,_,_,x,_,_,_ ), O(000F00,BA,5,_,x,_,_,_ ), 4 , 5 , 180 , 25 , 14 ), // #57
+ INST(Bzhi , VexRmv_Wx , V(000F38,F5,_,0,x,_,_,_ ), 0 , 10 , 0 , 184 , 13 , 15 ), // #58
+ INST(Call , X86Call , O(000000,FF,2,_,_,_,_,_ ), 0 , 1 , 0 , 2848 , 26 , 1 ), // #59
+ INST(Cbw , X86Op_xAX , O(660000,98,_,_,_,_,_,_ ), 0 , 19 , 0 , 189 , 27 , 0 ), // #60
+ INST(Cdq , X86Op_xDX_xAX , O(000000,99,_,_,_,_,_,_ ), 0 , 0 , 0 , 193 , 28 , 0 ), // #61
+ INST(Cdqe , X86Op_xAX , O(000000,98,_,_,1,_,_,_ ), 0 , 20 , 0 , 197 , 29 , 0 ), // #62
+ INST(Clac , X86Op , O(000F01,CA,_,_,_,_,_,_ ), 0 , 21 , 0 , 202 , 30 , 16 ), // #63
+ INST(Clc , X86Op , O(000000,F8,_,_,_,_,_,_ ), 0 , 0 , 0 , 207 , 30 , 17 ), // #64
+ INST(Cld , X86Op , O(000000,FC,_,_,_,_,_,_ ), 0 , 0 , 0 , 211 , 30 , 18 ), // #65
+ INST(Cldemote , X86M_Only , O(000F00,1C,0,_,_,_,_,_ ), 0 , 4 , 0 , 215 , 31 , 19 ), // #66
+ INST(Clflush , X86M_Only , O(000F00,AE,7,_,_,_,_,_ ), 0 , 22 , 0 , 224 , 31 , 20 ), // #67
+ INST(Clflushopt , X86M_Only , O(660F00,AE,7,_,_,_,_,_ ), 0 , 23 , 0 , 232 , 31 , 21 ), // #68
+ INST(Clgi , X86Op , O(000F01,DD,_,_,_,_,_,_ ), 0 , 21 , 0 , 243 , 30 , 22 ), // #69
+ INST(Cli , X86Op , O(000000,FA,_,_,_,_,_,_ ), 0 , 0 , 0 , 248 , 30 , 23 ), // #70
+ INST(Clts , X86Op , O(000F00,06,_,_,_,_,_,_ ), 0 , 4 , 0 , 252 , 30 , 0 ), // #71
+ INST(Clwb , X86M_Only , O(660F00,AE,6,_,_,_,_,_ ), 0 , 24 , 0 , 257 , 31 , 24 ), // #72
+ INST(Clzero , X86Op_MemZAX , O(000F01,FC,_,_,_,_,_,_ ), 0 , 21 , 0 , 262 , 32 , 25 ), // #73
+ INST(Cmc , X86Op , O(000000,F5,_,_,_,_,_,_ ), 0 , 0 , 0 , 269 , 30 , 26 ), // #74
+ INST(Cmova , X86Rm , O(000F00,47,_,_,x,_,_,_ ), 0 , 4 , 0 , 273 , 22 , 27 ), // #75
+ INST(Cmovae , X86Rm , O(000F00,43,_,_,x,_,_,_ ), 0 , 4 , 0 , 279 , 22 , 28 ), // #76
+ INST(Cmovb , X86Rm , O(000F00,42,_,_,x,_,_,_ ), 0 , 4 , 0 , 618 , 22 , 28 ), // #77
+ INST(Cmovbe , X86Rm , O(000F00,46,_,_,x,_,_,_ ), 0 , 4 , 0 , 625 , 22 , 27 ), // #78
+ INST(Cmovc , X86Rm , O(000F00,42,_,_,x,_,_,_ ), 0 , 4 , 0 , 286 , 22 , 28 ), // #79
+ INST(Cmove , X86Rm , O(000F00,44,_,_,x,_,_,_ ), 0 , 4 , 0 , 633 , 22 , 29 ), // #80
+ INST(Cmovg , X86Rm , O(000F00,4F,_,_,x,_,_,_ ), 0 , 4 , 0 , 292 , 22 , 30 ), // #81
+ INST(Cmovge , X86Rm , O(000F00,4D,_,_,x,_,_,_ ), 0 , 4 , 0 , 298 , 22 , 31 ), // #82
+ INST(Cmovl , X86Rm , O(000F00,4C,_,_,x,_,_,_ ), 0 , 4 , 0 , 305 , 22 , 31 ), // #83
+ INST(Cmovle , X86Rm , O(000F00,4E,_,_,x,_,_,_ ), 0 , 4 , 0 , 311 , 22 , 30 ), // #84
+ INST(Cmovna , X86Rm , O(000F00,46,_,_,x,_,_,_ ), 0 , 4 , 0 , 318 , 22 , 27 ), // #85
+ INST(Cmovnae , X86Rm , O(000F00,42,_,_,x,_,_,_ ), 0 , 4 , 0 , 325 , 22 , 28 ), // #86
+ INST(Cmovnb , X86Rm , O(000F00,43,_,_,x,_,_,_ ), 0 , 4 , 0 , 640 , 22 , 28 ), // #87
+ INST(Cmovnbe , X86Rm , O(000F00,47,_,_,x,_,_,_ ), 0 , 4 , 0 , 648 , 22 , 27 ), // #88
+ INST(Cmovnc , X86Rm , O(000F00,43,_,_,x,_,_,_ ), 0 , 4 , 0 , 333 , 22 , 28 ), // #89
+ INST(Cmovne , X86Rm , O(000F00,45,_,_,x,_,_,_ ), 0 , 4 , 0 , 657 , 22 , 29 ), // #90
+ INST(Cmovng , X86Rm , O(000F00,4E,_,_,x,_,_,_ ), 0 , 4 , 0 , 340 , 22 , 30 ), // #91
+ INST(Cmovnge , X86Rm , O(000F00,4C,_,_,x,_,_,_ ), 0 , 4 , 0 , 347 , 22 , 31 ), // #92
+ INST(Cmovnl , X86Rm , O(000F00,4D,_,_,x,_,_,_ ), 0 , 4 , 0 , 355 , 22 , 31 ), // #93
+ INST(Cmovnle , X86Rm , O(000F00,4F,_,_,x,_,_,_ ), 0 , 4 , 0 , 362 , 22 , 30 ), // #94
+ INST(Cmovno , X86Rm , O(000F00,41,_,_,x,_,_,_ ), 0 , 4 , 0 , 370 , 22 , 32 ), // #95
+ INST(Cmovnp , X86Rm , O(000F00,4B,_,_,x,_,_,_ ), 0 , 4 , 0 , 377 , 22 , 33 ), // #96
+ INST(Cmovns , X86Rm , O(000F00,49,_,_,x,_,_,_ ), 0 , 4 , 0 , 384 , 22 , 34 ), // #97
+ INST(Cmovnz , X86Rm , O(000F00,45,_,_,x,_,_,_ ), 0 , 4 , 0 , 391 , 22 , 29 ), // #98
+ INST(Cmovo , X86Rm , O(000F00,40,_,_,x,_,_,_ ), 0 , 4 , 0 , 398 , 22 , 32 ), // #99
+ INST(Cmovp , X86Rm , O(000F00,4A,_,_,x,_,_,_ ), 0 , 4 , 0 , 404 , 22 , 33 ), // #100
+ INST(Cmovpe , X86Rm , O(000F00,4A,_,_,x,_,_,_ ), 0 , 4 , 0 , 410 , 22 , 33 ), // #101
+ INST(Cmovpo , X86Rm , O(000F00,4B,_,_,x,_,_,_ ), 0 , 4 , 0 , 417 , 22 , 33 ), // #102
+ INST(Cmovs , X86Rm , O(000F00,48,_,_,x,_,_,_ ), 0 , 4 , 0 , 424 , 22 , 34 ), // #103
+ INST(Cmovz , X86Rm , O(000F00,44,_,_,x,_,_,_ ), 0 , 4 , 0 , 430 , 22 , 29 ), // #104
+ INST(Cmp , X86Arith , O(000000,38,7,_,x,_,_,_ ), 0 , 25 , 0 , 436 , 33 , 1 ), // #105
+ INST(Cmppd , ExtRmi , O(660F00,C2,_,_,_,_,_,_ ), 0 , 3 , 0 , 3431 , 8 , 4 ), // #106
+ INST(Cmpps , ExtRmi , O(000F00,C2,_,_,_,_,_,_ ), 0 , 4 , 0 , 3438 , 8 , 5 ), // #107
+ INST(Cmps , X86StrMm , O(000000,A6,_,_,_,_,_,_ ), 0 , 0 , 0 , 440 , 34 , 35 ), // #108
+ INST(Cmpsd , ExtRmi , O(F20F00,C2,_,_,_,_,_,_ ), 0 , 5 , 0 , 3445 , 35 , 4 ), // #109
+ INST(Cmpss , ExtRmi , O(F30F00,C2,_,_,_,_,_,_ ), 0 , 6 , 0 , 3452 , 36 , 5 ), // #110
+ INST(Cmpxchg , X86Cmpxchg , O(000F00,B0,_,_,x,_,_,_ ), 0 , 4 , 0 , 445 , 37 , 36 ), // #111
+ INST(Cmpxchg16b , X86Cmpxchg8b_16b , O(000F00,C7,1,_,1,_,_,_ ), 0 , 26 , 0 , 453 , 38 , 37 ), // #112
+ INST(Cmpxchg8b , X86Cmpxchg8b_16b , O(000F00,C7,1,_,_,_,_,_ ), 0 , 27 , 0 , 464 , 39 , 38 ), // #113
+ INST(Comisd , ExtRm , O(660F00,2F,_,_,_,_,_,_ ), 0 , 3 , 0 , 9930 , 6 , 39 ), // #114
+ INST(Comiss , ExtRm , O(000F00,2F,_,_,_,_,_,_ ), 0 , 4 , 0 , 9939 , 7 , 40 ), // #115
+ INST(Cpuid , X86Op , O(000F00,A2,_,_,_,_,_,_ ), 0 , 4 , 0 , 474 , 40 , 41 ), // #116
+ INST(Cqo , X86Op_xDX_xAX , O(000000,99,_,_,1,_,_,_ ), 0 , 20 , 0 , 480 , 41 , 0 ), // #117
+ INST(Crc32 , X86Crc , O(F20F38,F0,_,_,x,_,_,_ ), 0 , 28 , 0 , 484 , 42 , 42 ), // #118
+ INST(Cvtdq2pd , ExtRm , O(F30F00,E6,_,_,_,_,_,_ ), 0 , 6 , 0 , 3499 , 6 , 4 ), // #119
+ INST(Cvtdq2ps , ExtRm , O(000F00,5B,_,_,_,_,_,_ ), 0 , 4 , 0 , 3509 , 5 , 4 ), // #120
+ INST(Cvtpd2dq , ExtRm , O(F20F00,E6,_,_,_,_,_,_ ), 0 , 5 , 0 , 3548 , 5 , 4 ), // #121
+ INST(Cvtpd2pi , ExtRm , O(660F00,2D,_,_,_,_,_,_ ), 0 , 3 , 0 , 490 , 43 , 4 ), // #122
+ INST(Cvtpd2ps , ExtRm , O(660F00,5A,_,_,_,_,_,_ ), 0 , 3 , 0 , 3558 , 5 , 4 ), // #123
+ INST(Cvtpi2pd , ExtRm , O(660F00,2A,_,_,_,_,_,_ ), 0 , 3 , 0 , 499 , 44 , 4 ), // #124
+ INST(Cvtpi2ps , ExtRm , O(000F00,2A,_,_,_,_,_,_ ), 0 , 4 , 0 , 508 , 44 , 5 ), // #125
+ INST(Cvtps2dq , ExtRm , O(660F00,5B,_,_,_,_,_,_ ), 0 , 3 , 0 , 3610 , 5 , 4 ), // #126
+ INST(Cvtps2pd , ExtRm , O(000F00,5A,_,_,_,_,_,_ ), 0 , 4 , 0 , 3620 , 6 , 4 ), // #127
+ INST(Cvtps2pi , ExtRm , O(000F00,2D,_,_,_,_,_,_ ), 0 , 4 , 0 , 517 , 45 , 5 ), // #128
+ INST(Cvtsd2si , ExtRm_Wx , O(F20F00,2D,_,_,x,_,_,_ ), 0 , 5 , 0 , 3692 , 46 , 4 ), // #129
+ INST(Cvtsd2ss , ExtRm , O(F20F00,5A,_,_,_,_,_,_ ), 0 , 5 , 0 , 3702 , 6 , 4 ), // #130
+ INST(Cvtsi2sd , ExtRm_Wx , O(F20F00,2A,_,_,x,_,_,_ ), 0 , 5 , 0 , 3723 , 47 , 4 ), // #131
+ INST(Cvtsi2ss , ExtRm_Wx , O(F30F00,2A,_,_,x,_,_,_ ), 0 , 6 , 0 , 3733 , 47 , 5 ), // #132
+ INST(Cvtss2sd , ExtRm , O(F30F00,5A,_,_,_,_,_,_ ), 0 , 6 , 0 , 3743 , 7 , 4 ), // #133
+ INST(Cvtss2si , ExtRm_Wx , O(F30F00,2D,_,_,x,_,_,_ ), 0 , 6 , 0 , 3753 , 48 , 5 ), // #134
+ INST(Cvttpd2dq , ExtRm , O(660F00,E6,_,_,_,_,_,_ ), 0 , 3 , 0 , 3774 , 5 , 4 ), // #135
+ INST(Cvttpd2pi , ExtRm , O(660F00,2C,_,_,_,_,_,_ ), 0 , 3 , 0 , 526 , 43 , 4 ), // #136
+ INST(Cvttps2dq , ExtRm , O(F30F00,5B,_,_,_,_,_,_ ), 0 , 6 , 0 , 3820 , 5 , 4 ), // #137
+ INST(Cvttps2pi , ExtRm , O(000F00,2C,_,_,_,_,_,_ ), 0 , 4 , 0 , 536 , 45 , 5 ), // #138
+ INST(Cvttsd2si , ExtRm_Wx , O(F20F00,2C,_,_,x,_,_,_ ), 0 , 5 , 0 , 3866 , 46 , 4 ), // #139
+ INST(Cvttss2si , ExtRm_Wx , O(F30F00,2C,_,_,x,_,_,_ ), 0 , 6 , 0 , 3889 , 48 , 5 ), // #140
+ INST(Cwd , X86Op_xDX_xAX , O(660000,99,_,_,_,_,_,_ ), 0 , 19 , 0 , 546 , 49 , 0 ), // #141
+ INST(Cwde , X86Op_xAX , O(000000,98,_,_,_,_,_,_ ), 0 , 0 , 0 , 550 , 50 , 0 ), // #142
+ INST(Daa , X86Op , O(000000,27,_,_,_,_,_,_ ), 0 , 0 , 0 , 555 , 1 , 1 ), // #143
+ INST(Das , X86Op , O(000000,2F,_,_,_,_,_,_ ), 0 , 0 , 0 , 559 , 1 , 1 ), // #144
+ INST(Dec , X86IncDec , O(000000,FE,1,_,x,_,_,_ ), O(000000,48,_,_,x,_,_,_ ), 29 , 6 , 3013 , 51 , 43 ), // #145
+ INST(Div , X86M_GPB_MulDiv , O(000000,F6,6,_,x,_,_,_ ), 0 , 30 , 0 , 780 , 52 , 1 ), // #146
+ INST(Divpd , ExtRm , O(660F00,5E,_,_,_,_,_,_ ), 0 , 3 , 0 , 3988 , 5 , 4 ), // #147
+ INST(Divps , ExtRm , O(000F00,5E,_,_,_,_,_,_ ), 0 , 4 , 0 , 3995 , 5 , 5 ), // #148
+ INST(Divsd , ExtRm , O(F20F00,5E,_,_,_,_,_,_ ), 0 , 5 , 0 , 4002 , 6 , 4 ), // #149
+ INST(Divss , ExtRm , O(F30F00,5E,_,_,_,_,_,_ ), 0 , 6 , 0 , 4009 , 7 , 5 ), // #150
+ INST(Dppd , ExtRmi , O(660F3A,41,_,_,_,_,_,_ ), 0 , 8 , 0 , 4026 , 8 , 12 ), // #151
+ INST(Dpps , ExtRmi , O(660F3A,40,_,_,_,_,_,_ ), 0 , 8 , 0 , 4032 , 8 , 12 ), // #152
+ INST(Emms , X86Op , O(000F00,77,_,_,_,_,_,_ ), 0 , 4 , 0 , 748 , 53 , 44 ), // #153
+ INST(Enqcmd , X86EnqcmdMovdir64b , O(F20F38,F8,_,_,_,_,_,_ ), 0 , 28 , 0 , 563 , 54 , 45 ), // #154
+ INST(Enqcmds , X86EnqcmdMovdir64b , O(F30F38,F8,_,_,_,_,_,_ ), 0 , 7 , 0 , 570 , 54 , 45 ), // #155
+ INST(Enter , X86Enter , O(000000,C8,_,_,_,_,_,_ ), 0 , 0 , 0 , 2856 , 55 , 0 ), // #156
+ INST(Extractps , ExtExtract , O(660F3A,17,_,_,_,_,_,_ ), 0 , 8 , 0 , 4222 , 56 , 12 ), // #157
+ INST(Extrq , ExtExtrq , O(660F00,79,_,_,_,_,_,_ ), O(660F00,78,0,_,_,_,_,_ ), 3 , 7 , 7290 , 57 , 46 ), // #158
+ INST(F2xm1 , FpuOp , O_FPU(00,D9F0,_) , 0 , 31 , 0 , 578 , 30 , 0 ), // #159
+ INST(Fabs , FpuOp , O_FPU(00,D9E1,_) , 0 , 31 , 0 , 584 , 30 , 0 ), // #160
+ INST(Fadd , FpuArith , O_FPU(00,C0C0,0) , 0 , 32 , 0 , 2067 , 58 , 0 ), // #161
+ INST(Faddp , FpuRDef , O_FPU(00,DEC0,_) , 0 , 33 , 0 , 589 , 59 , 0 ), // #162
+ INST(Fbld , X86M_Only , O_FPU(00,00DF,4) , 0 , 34 , 0 , 595 , 60 , 0 ), // #163
+ INST(Fbstp , X86M_Only , O_FPU(00,00DF,6) , 0 , 35 , 0 , 600 , 60 , 0 ), // #164
+ INST(Fchs , FpuOp , O_FPU(00,D9E0,_) , 0 , 31 , 0 , 606 , 30 , 0 ), // #165
+ INST(Fclex , FpuOp , O_FPU(9B,DBE2,_) , 0 , 36 , 0 , 611 , 30 , 0 ), // #166
+ INST(Fcmovb , FpuR , O_FPU(00,DAC0,_) , 0 , 37 , 0 , 617 , 61 , 28 ), // #167
+ INST(Fcmovbe , FpuR , O_FPU(00,DAD0,_) , 0 , 37 , 0 , 624 , 61 , 27 ), // #168
+ INST(Fcmove , FpuR , O_FPU(00,DAC8,_) , 0 , 37 , 0 , 632 , 61 , 29 ), // #169
+ INST(Fcmovnb , FpuR , O_FPU(00,DBC0,_) , 0 , 38 , 0 , 639 , 61 , 28 ), // #170
+ INST(Fcmovnbe , FpuR , O_FPU(00,DBD0,_) , 0 , 38 , 0 , 647 , 61 , 27 ), // #171
+ INST(Fcmovne , FpuR , O_FPU(00,DBC8,_) , 0 , 38 , 0 , 656 , 61 , 29 ), // #172
+ INST(Fcmovnu , FpuR , O_FPU(00,DBD8,_) , 0 , 38 , 0 , 664 , 61 , 33 ), // #173
+ INST(Fcmovu , FpuR , O_FPU(00,DAD8,_) , 0 , 37 , 0 , 672 , 61 , 33 ), // #174
+ INST(Fcom , FpuCom , O_FPU(00,D0D0,2) , 0 , 39 , 0 , 679 , 62 , 0 ), // #175
+ INST(Fcomi , FpuR , O_FPU(00,DBF0,_) , 0 , 38 , 0 , 684 , 61 , 47 ), // #176
+ INST(Fcomip , FpuR , O_FPU(00,DFF0,_) , 0 , 40 , 0 , 690 , 61 , 47 ), // #177
+ INST(Fcomp , FpuCom , O_FPU(00,D8D8,3) , 0 , 41 , 0 , 697 , 62 , 0 ), // #178
+ INST(Fcompp , FpuOp , O_FPU(00,DED9,_) , 0 , 33 , 0 , 703 , 30 , 0 ), // #179
+ INST(Fcos , FpuOp , O_FPU(00,D9FF,_) , 0 , 31 , 0 , 710 , 30 , 0 ), // #180
+ INST(Fdecstp , FpuOp , O_FPU(00,D9F6,_) , 0 , 31 , 0 , 715 , 30 , 0 ), // #181
+ INST(Fdiv , FpuArith , O_FPU(00,F0F8,6) , 0 , 42 , 0 , 723 , 58 , 0 ), // #182
+ INST(Fdivp , FpuRDef , O_FPU(00,DEF8,_) , 0 , 33 , 0 , 728 , 59 , 0 ), // #183
+ INST(Fdivr , FpuArith , O_FPU(00,F8F0,7) , 0 , 43 , 0 , 734 , 58 , 0 ), // #184
+ INST(Fdivrp , FpuRDef , O_FPU(00,DEF0,_) , 0 , 33 , 0 , 740 , 59 , 0 ), // #185
+ INST(Femms , X86Op , O(000F00,0E,_,_,_,_,_,_ ), 0 , 4 , 0 , 747 , 30 , 48 ), // #186
+ INST(Ffree , FpuR , O_FPU(00,DDC0,_) , 0 , 44 , 0 , 753 , 61 , 0 ), // #187
+ INST(Fiadd , FpuM , O_FPU(00,00DA,0) , 0 , 45 , 0 , 759 , 63 , 0 ), // #188
+ INST(Ficom , FpuM , O_FPU(00,00DA,2) , 0 , 46 , 0 , 765 , 63 , 0 ), // #189
+ INST(Ficomp , FpuM , O_FPU(00,00DA,3) , 0 , 47 , 0 , 771 , 63 , 0 ), // #190
+ INST(Fidiv , FpuM , O_FPU(00,00DA,6) , 0 , 35 , 0 , 778 , 63 , 0 ), // #191
+ INST(Fidivr , FpuM , O_FPU(00,00DA,7) , 0 , 48 , 0 , 784 , 63 , 0 ), // #192
+ INST(Fild , FpuM , O_FPU(00,00DB,0) , O_FPU(00,00DF,5) , 45 , 8 , 791 , 64 , 0 ), // #193
+ INST(Fimul , FpuM , O_FPU(00,00DA,1) , 0 , 49 , 0 , 796 , 63 , 0 ), // #194
+ INST(Fincstp , FpuOp , O_FPU(00,D9F7,_) , 0 , 31 , 0 , 802 , 30 , 0 ), // #195
+ INST(Finit , FpuOp , O_FPU(9B,DBE3,_) , 0 , 36 , 0 , 810 , 30 , 0 ), // #196
+ INST(Fist , FpuM , O_FPU(00,00DB,2) , 0 , 46 , 0 , 816 , 63 , 0 ), // #197
+ INST(Fistp , FpuM , O_FPU(00,00DB,3) , O_FPU(00,00DF,7) , 47 , 9 , 821 , 64 , 0 ), // #198
+ INST(Fisttp , FpuM , O_FPU(00,00DB,1) , O_FPU(00,00DD,1) , 49 , 10 , 827 , 64 , 6 ), // #199
+ INST(Fisub , FpuM , O_FPU(00,00DA,4) , 0 , 34 , 0 , 834 , 63 , 0 ), // #200
+ INST(Fisubr , FpuM , O_FPU(00,00DA,5) , 0 , 50 , 0 , 840 , 63 , 0 ), // #201
+ INST(Fld , FpuFldFst , O_FPU(00,00D9,0) , O_FPU(00,00DB,5) , 45 , 11 , 847 , 65 , 0 ), // #202
+ INST(Fld1 , FpuOp , O_FPU(00,D9E8,_) , 0 , 31 , 0 , 851 , 30 , 0 ), // #203
+ INST(Fldcw , X86M_Only , O_FPU(00,00D9,5) , 0 , 50 , 0 , 856 , 66 , 0 ), // #204
+ INST(Fldenv , X86M_Only , O_FPU(00,00D9,4) , 0 , 34 , 0 , 862 , 31 , 0 ), // #205
+ INST(Fldl2e , FpuOp , O_FPU(00,D9EA,_) , 0 , 31 , 0 , 869 , 30 , 0 ), // #206
+ INST(Fldl2t , FpuOp , O_FPU(00,D9E9,_) , 0 , 31 , 0 , 876 , 30 , 0 ), // #207
+ INST(Fldlg2 , FpuOp , O_FPU(00,D9EC,_) , 0 , 31 , 0 , 883 , 30 , 0 ), // #208
+ INST(Fldln2 , FpuOp , O_FPU(00,D9ED,_) , 0 , 31 , 0 , 890 , 30 , 0 ), // #209
+ INST(Fldpi , FpuOp , O_FPU(00,D9EB,_) , 0 , 31 , 0 , 897 , 30 , 0 ), // #210
+ INST(Fldz , FpuOp , O_FPU(00,D9EE,_) , 0 , 31 , 0 , 903 , 30 , 0 ), // #211
+ INST(Fmul , FpuArith , O_FPU(00,C8C8,1) , 0 , 51 , 0 , 2109 , 58 , 0 ), // #212
+ INST(Fmulp , FpuRDef , O_FPU(00,DEC8,_) , 0 , 33 , 0 , 908 , 59 , 0 ), // #213
+ INST(Fnclex , FpuOp , O_FPU(00,DBE2,_) , 0 , 38 , 0 , 914 , 30 , 0 ), // #214
+ INST(Fninit , FpuOp , O_FPU(00,DBE3,_) , 0 , 38 , 0 , 921 , 30 , 0 ), // #215
+ INST(Fnop , FpuOp , O_FPU(00,D9D0,_) , 0 , 31 , 0 , 928 , 30 , 0 ), // #216
+ INST(Fnsave , X86M_Only , O_FPU(00,00DD,6) , 0 , 35 , 0 , 933 , 31 , 0 ), // #217
+ INST(Fnstcw , X86M_Only , O_FPU(00,00D9,7) , 0 , 48 , 0 , 940 , 66 , 0 ), // #218
+ INST(Fnstenv , X86M_Only , O_FPU(00,00D9,6) , 0 , 35 , 0 , 947 , 31 , 0 ), // #219
+ INST(Fnstsw , FpuStsw , O_FPU(00,00DD,7) , O_FPU(00,DFE0,_) , 48 , 12 , 955 , 67 , 0 ), // #220
+ INST(Fpatan , FpuOp , O_FPU(00,D9F3,_) , 0 , 31 , 0 , 962 , 30 , 0 ), // #221
+ INST(Fprem , FpuOp , O_FPU(00,D9F8,_) , 0 , 31 , 0 , 969 , 30 , 0 ), // #222
+ INST(Fprem1 , FpuOp , O_FPU(00,D9F5,_) , 0 , 31 , 0 , 975 , 30 , 0 ), // #223
+ INST(Fptan , FpuOp , O_FPU(00,D9F2,_) , 0 , 31 , 0 , 982 , 30 , 0 ), // #224
+ INST(Frndint , FpuOp , O_FPU(00,D9FC,_) , 0 , 31 , 0 , 988 , 30 , 0 ), // #225
+ INST(Frstor , X86M_Only , O_FPU(00,00DD,4) , 0 , 34 , 0 , 996 , 31 , 0 ), // #226
+ INST(Fsave , X86M_Only , O_FPU(9B,00DD,6) , 0 , 52 , 0 , 1003 , 31 , 0 ), // #227
+ INST(Fscale , FpuOp , O_FPU(00,D9FD,_) , 0 , 31 , 0 , 1009 , 30 , 0 ), // #228
+ INST(Fsin , FpuOp , O_FPU(00,D9FE,_) , 0 , 31 , 0 , 1016 , 30 , 0 ), // #229
+ INST(Fsincos , FpuOp , O_FPU(00,D9FB,_) , 0 , 31 , 0 , 1021 , 30 , 0 ), // #230
+ INST(Fsqrt , FpuOp , O_FPU(00,D9FA,_) , 0 , 31 , 0 , 1029 , 30 , 0 ), // #231
+ INST(Fst , FpuFldFst , O_FPU(00,00D9,2) , 0 , 46 , 0 , 1035 , 68 , 0 ), // #232
+ INST(Fstcw , X86M_Only , O_FPU(9B,00D9,7) , 0 , 53 , 0 , 1039 , 66 , 0 ), // #233
+ INST(Fstenv , X86M_Only , O_FPU(9B,00D9,6) , 0 , 52 , 0 , 1045 , 31 , 0 ), // #234
+ INST(Fstp , FpuFldFst , O_FPU(00,00D9,3) , O(000000,DB,7,_,_,_,_,_ ), 47 , 13 , 1052 , 65 , 0 ), // #235
+ INST(Fstsw , FpuStsw , O_FPU(9B,00DD,7) , O_FPU(9B,DFE0,_) , 53 , 14 , 1057 , 67 , 0 ), // #236
+ INST(Fsub , FpuArith , O_FPU(00,E0E8,4) , 0 , 54 , 0 , 2187 , 58 , 0 ), // #237
+ INST(Fsubp , FpuRDef , O_FPU(00,DEE8,_) , 0 , 33 , 0 , 1063 , 59 , 0 ), // #238
+ INST(Fsubr , FpuArith , O_FPU(00,E8E0,5) , 0 , 55 , 0 , 2193 , 58 , 0 ), // #239
+ INST(Fsubrp , FpuRDef , O_FPU(00,DEE0,_) , 0 , 33 , 0 , 1069 , 59 , 0 ), // #240
+ INST(Ftst , FpuOp , O_FPU(00,D9E4,_) , 0 , 31 , 0 , 1076 , 30 , 0 ), // #241
+ INST(Fucom , FpuRDef , O_FPU(00,DDE0,_) , 0 , 44 , 0 , 1081 , 59 , 0 ), // #242
+ INST(Fucomi , FpuR , O_FPU(00,DBE8,_) , 0 , 38 , 0 , 1087 , 61 , 47 ), // #243
+ INST(Fucomip , FpuR , O_FPU(00,DFE8,_) , 0 , 40 , 0 , 1094 , 61 , 47 ), // #244
+ INST(Fucomp , FpuRDef , O_FPU(00,DDE8,_) , 0 , 44 , 0 , 1102 , 59 , 0 ), // #245
+ INST(Fucompp , FpuOp , O_FPU(00,DAE9,_) , 0 , 37 , 0 , 1109 , 30 , 0 ), // #246
+ INST(Fwait , X86Op , O_FPU(00,009B,_) , 0 , 56 , 0 , 1117 , 30 , 0 ), // #247
+ INST(Fxam , FpuOp , O_FPU(00,D9E5,_) , 0 , 31 , 0 , 1123 , 30 , 0 ), // #248
+ INST(Fxch , FpuR , O_FPU(00,D9C8,_) , 0 , 31 , 0 , 1128 , 59 , 0 ), // #249
+ INST(Fxrstor , X86M_Only , O(000F00,AE,1,_,_,_,_,_ ), 0 , 27 , 0 , 1133 , 31 , 49 ), // #250
+ INST(Fxrstor64 , X86M_Only , O(000F00,AE,1,_,1,_,_,_ ), 0 , 26 , 0 , 1141 , 69 , 49 ), // #251
+ INST(Fxsave , X86M_Only , O(000F00,AE,0,_,_,_,_,_ ), 0 , 4 , 0 , 1151 , 31 , 49 ), // #252
+ INST(Fxsave64 , X86M_Only , O(000F00,AE,0,_,1,_,_,_ ), 0 , 57 , 0 , 1158 , 69 , 49 ), // #253
+ INST(Fxtract , FpuOp , O_FPU(00,D9F4,_) , 0 , 31 , 0 , 1167 , 30 , 0 ), // #254
+ INST(Fyl2x , FpuOp , O_FPU(00,D9F1,_) , 0 , 31 , 0 , 1175 , 30 , 0 ), // #255
+ INST(Fyl2xp1 , FpuOp , O_FPU(00,D9F9,_) , 0 , 31 , 0 , 1181 , 30 , 0 ), // #256
+ INST(Getsec , X86Op , O(000F00,37,_,_,_,_,_,_ ), 0 , 4 , 0 , 1189 , 30 , 50 ), // #257
+ INST(Gf2p8affineinvqb , ExtRmi , O(660F3A,CF,_,_,_,_,_,_ ), 0 , 8 , 0 , 5577 , 8 , 51 ), // #258
+ INST(Gf2p8affineqb , ExtRmi , O(660F3A,CE,_,_,_,_,_,_ ), 0 , 8 , 0 , 5595 , 8 , 51 ), // #259
+ INST(Gf2p8mulb , ExtRm , O(660F38,CF,_,_,_,_,_,_ ), 0 , 2 , 0 , 5610 , 5 , 51 ), // #260
+ INST(Haddpd , ExtRm , O(660F00,7C,_,_,_,_,_,_ ), 0 , 3 , 0 , 5621 , 5 , 6 ), // #261
+ INST(Haddps , ExtRm , O(F20F00,7C,_,_,_,_,_,_ ), 0 , 5 , 0 , 5629 , 5 , 6 ), // #262
+ INST(Hlt , X86Op , O(000000,F4,_,_,_,_,_,_ ), 0 , 0 , 0 , 1196 , 30 , 0 ), // #263
+ INST(Hsubpd , ExtRm , O(660F00,7D,_,_,_,_,_,_ ), 0 , 3 , 0 , 5637 , 5 , 6 ), // #264
+ INST(Hsubps , ExtRm , O(F20F00,7D,_,_,_,_,_,_ ), 0 , 5 , 0 , 5645 , 5 , 6 ), // #265
+ INST(Idiv , X86M_GPB_MulDiv , O(000000,F6,7,_,x,_,_,_ ), 0 , 25 , 0 , 779 , 52 , 1 ), // #266
+ INST(Imul , X86Imul , O(000000,F6,5,_,x,_,_,_ ), 0 , 58 , 0 , 797 , 70 , 1 ), // #267
+ INST(In , X86In , O(000000,EC,_,_,_,_,_,_ ), O(000000,E4,_,_,_,_,_,_ ), 0 , 15 , 10076, 71 , 0 ), // #268
+ INST(Inc , X86IncDec , O(000000,FE,0,_,x,_,_,_ ), O(000000,40,_,_,x,_,_,_ ), 0 , 16 , 1200 , 51 , 43 ), // #269
+ INST(Ins , X86Ins , O(000000,6C,_,_,_,_,_,_ ), 0 , 0 , 0 , 1857 , 72 , 0 ), // #270
+ INST(Insertps , ExtRmi , O(660F3A,21,_,_,_,_,_,_ ), 0 , 8 , 0 , 5781 , 36 , 12 ), // #271
+ INST(Insertq , ExtInsertq , O(F20F00,79,_,_,_,_,_,_ ), O(F20F00,78,_,_,_,_,_,_ ), 5 , 17 , 1204 , 73 , 46 ), // #272
+ INST(Int , X86Int , O(000000,CD,_,_,_,_,_,_ ), 0 , 0 , 0 , 992 , 74 , 0 ), // #273
+ INST(Int3 , X86Op , O(000000,CC,_,_,_,_,_,_ ), 0 , 0 , 0 , 1212 , 30 , 0 ), // #274
+ INST(Into , X86Op , O(000000,CE,_,_,_,_,_,_ ), 0 , 0 , 0 , 1217 , 75 , 52 ), // #275
+ INST(Invd , X86Op , O(000F00,08,_,_,_,_,_,_ ), 0 , 4 , 0 , 10031, 30 , 41 ), // #276
+ INST(Invept , X86Rm_NoSize , O(660F38,80,_,_,_,_,_,_ ), 0 , 2 , 0 , 1222 , 76 , 53 ), // #277
+ INST(Invlpg , X86M_Only , O(000F00,01,7,_,_,_,_,_ ), 0 , 22 , 0 , 1229 , 31 , 41 ), // #278
+ INST(Invlpga , X86Op_xAddr , O(000F01,DF,_,_,_,_,_,_ ), 0 , 21 , 0 , 1236 , 77 , 22 ), // #279
+ INST(Invpcid , X86Rm_NoSize , O(660F38,82,_,_,_,_,_,_ ), 0 , 2 , 0 , 1244 , 76 , 41 ), // #280
+ INST(Invvpid , X86Rm_NoSize , O(660F38,81,_,_,_,_,_,_ ), 0 , 2 , 0 , 1252 , 76 , 53 ), // #281
+ INST(Iret , X86Op , O(000000,CF,_,_,_,_,_,_ ), 0 , 0 , 0 , 1260 , 78 , 1 ), // #282
+ INST(Iretd , X86Op , O(000000,CF,_,_,_,_,_,_ ), 0 , 0 , 0 , 1265 , 78 , 1 ), // #283
+ INST(Iretq , X86Op , O(000000,CF,_,_,1,_,_,_ ), 0 , 20 , 0 , 1271 , 79 , 1 ), // #284
+ INST(Iretw , X86Op , O(660000,CF,_,_,_,_,_,_ ), 0 , 19 , 0 , 1277 , 78 , 1 ), // #285
+ INST(Ja , X86Jcc , O(000F00,87,_,_,_,_,_,_ ), O(000000,77,_,_,_,_,_,_ ), 4 , 18 , 1283 , 80 , 54 ), // #286
+ INST(Jae , X86Jcc , O(000F00,83,_,_,_,_,_,_ ), O(000000,73,_,_,_,_,_,_ ), 4 , 19 , 1286 , 80 , 55 ), // #287
+ INST(Jb , X86Jcc , O(000F00,82,_,_,_,_,_,_ ), O(000000,72,_,_,_,_,_,_ ), 4 , 20 , 1290 , 80 , 55 ), // #288
+ INST(Jbe , X86Jcc , O(000F00,86,_,_,_,_,_,_ ), O(000000,76,_,_,_,_,_,_ ), 4 , 21 , 1293 , 80 , 54 ), // #289
+ INST(Jc , X86Jcc , O(000F00,82,_,_,_,_,_,_ ), O(000000,72,_,_,_,_,_,_ ), 4 , 20 , 1297 , 80 , 55 ), // #290
+ INST(Je , X86Jcc , O(000F00,84,_,_,_,_,_,_ ), O(000000,74,_,_,_,_,_,_ ), 4 , 22 , 1300 , 80 , 56 ), // #291
+ INST(Jecxz , X86JecxzLoop , 0 , O(000000,E3,_,_,_,_,_,_ ), 0 , 23 , 1303 , 81 , 0 ), // #292
+ INST(Jg , X86Jcc , O(000F00,8F,_,_,_,_,_,_ ), O(000000,7F,_,_,_,_,_,_ ), 4 , 24 , 1309 , 80 , 57 ), // #293
+ INST(Jge , X86Jcc , O(000F00,8D,_,_,_,_,_,_ ), O(000000,7D,_,_,_,_,_,_ ), 4 , 25 , 1312 , 80 , 58 ), // #294
+ INST(Jl , X86Jcc , O(000F00,8C,_,_,_,_,_,_ ), O(000000,7C,_,_,_,_,_,_ ), 4 , 26 , 1316 , 80 , 58 ), // #295
+ INST(Jle , X86Jcc , O(000F00,8E,_,_,_,_,_,_ ), O(000000,7E,_,_,_,_,_,_ ), 4 , 27 , 1319 , 80 , 57 ), // #296
+ INST(Jmp , X86Jmp , O(000000,FF,4,_,_,_,_,_ ), O(000000,EB,_,_,_,_,_,_ ), 9 , 28 , 1323 , 82 , 0 ), // #297
+ INST(Jna , X86Jcc , O(000F00,86,_,_,_,_,_,_ ), O(000000,76,_,_,_,_,_,_ ), 4 , 21 , 1327 , 80 , 54 ), // #298
+ INST(Jnae , X86Jcc , O(000F00,82,_,_,_,_,_,_ ), O(000000,72,_,_,_,_,_,_ ), 4 , 20 , 1331 , 80 , 55 ), // #299
+ INST(Jnb , X86Jcc , O(000F00,83,_,_,_,_,_,_ ), O(000000,73,_,_,_,_,_,_ ), 4 , 19 , 1336 , 80 , 55 ), // #300
+ INST(Jnbe , X86Jcc , O(000F00,87,_,_,_,_,_,_ ), O(000000,77,_,_,_,_,_,_ ), 4 , 18 , 1340 , 80 , 54 ), // #301
+ INST(Jnc , X86Jcc , O(000F00,83,_,_,_,_,_,_ ), O(000000,73,_,_,_,_,_,_ ), 4 , 19 , 1345 , 80 , 55 ), // #302
+ INST(Jne , X86Jcc , O(000F00,85,_,_,_,_,_,_ ), O(000000,75,_,_,_,_,_,_ ), 4 , 29 , 1349 , 80 , 56 ), // #303
+ INST(Jng , X86Jcc , O(000F00,8E,_,_,_,_,_,_ ), O(000000,7E,_,_,_,_,_,_ ), 4 , 27 , 1353 , 80 , 57 ), // #304
+ INST(Jnge , X86Jcc , O(000F00,8C,_,_,_,_,_,_ ), O(000000,7C,_,_,_,_,_,_ ), 4 , 26 , 1357 , 80 , 58 ), // #305
+ INST(Jnl , X86Jcc , O(000F00,8D,_,_,_,_,_,_ ), O(000000,7D,_,_,_,_,_,_ ), 4 , 25 , 1362 , 80 , 58 ), // #306
+ INST(Jnle , X86Jcc , O(000F00,8F,_,_,_,_,_,_ ), O(000000,7F,_,_,_,_,_,_ ), 4 , 24 , 1366 , 80 , 57 ), // #307
+ INST(Jno , X86Jcc , O(000F00,81,_,_,_,_,_,_ ), O(000000,71,_,_,_,_,_,_ ), 4 , 30 , 1371 , 80 , 52 ), // #308
+ INST(Jnp , X86Jcc , O(000F00,8B,_,_,_,_,_,_ ), O(000000,7B,_,_,_,_,_,_ ), 4 , 31 , 1375 , 80 , 59 ), // #309
+ INST(Jns , X86Jcc , O(000F00,89,_,_,_,_,_,_ ), O(000000,79,_,_,_,_,_,_ ), 4 , 32 , 1379 , 80 , 60 ), // #310
+ INST(Jnz , X86Jcc , O(000F00,85,_,_,_,_,_,_ ), O(000000,75,_,_,_,_,_,_ ), 4 , 29 , 1383 , 80 , 56 ), // #311
+ INST(Jo , X86Jcc , O(000F00,80,_,_,_,_,_,_ ), O(000000,70,_,_,_,_,_,_ ), 4 , 33 , 1387 , 80 , 52 ), // #312
+ INST(Jp , X86Jcc , O(000F00,8A,_,_,_,_,_,_ ), O(000000,7A,_,_,_,_,_,_ ), 4 , 34 , 1390 , 80 , 59 ), // #313
+ INST(Jpe , X86Jcc , O(000F00,8A,_,_,_,_,_,_ ), O(000000,7A,_,_,_,_,_,_ ), 4 , 34 , 1393 , 80 , 59 ), // #314
+ INST(Jpo , X86Jcc , O(000F00,8B,_,_,_,_,_,_ ), O(000000,7B,_,_,_,_,_,_ ), 4 , 31 , 1397 , 80 , 59 ), // #315
+ INST(Js , X86Jcc , O(000F00,88,_,_,_,_,_,_ ), O(000000,78,_,_,_,_,_,_ ), 4 , 35 , 1401 , 80 , 60 ), // #316
+ INST(Jz , X86Jcc , O(000F00,84,_,_,_,_,_,_ ), O(000000,74,_,_,_,_,_,_ ), 4 , 22 , 1404 , 80 , 56 ), // #317
+ INST(Kaddb , VexRvm , V(660F00,4A,_,1,0,_,_,_ ), 0 , 59 , 0 , 1407 , 83 , 61 ), // #318
+ INST(Kaddd , VexRvm , V(660F00,4A,_,1,1,_,_,_ ), 0 , 60 , 0 , 1413 , 83 , 62 ), // #319
+ INST(Kaddq , VexRvm , V(000F00,4A,_,1,1,_,_,_ ), 0 , 61 , 0 , 1419 , 83 , 62 ), // #320
+ INST(Kaddw , VexRvm , V(000F00,4A,_,1,0,_,_,_ ), 0 , 62 , 0 , 1425 , 83 , 61 ), // #321
+ INST(Kandb , VexRvm , V(660F00,41,_,1,0,_,_,_ ), 0 , 59 , 0 , 1431 , 83 , 61 ), // #322
+ INST(Kandd , VexRvm , V(660F00,41,_,1,1,_,_,_ ), 0 , 60 , 0 , 1437 , 83 , 62 ), // #323
+ INST(Kandnb , VexRvm , V(660F00,42,_,1,0,_,_,_ ), 0 , 59 , 0 , 1443 , 83 , 61 ), // #324
+ INST(Kandnd , VexRvm , V(660F00,42,_,1,1,_,_,_ ), 0 , 60 , 0 , 1450 , 83 , 62 ), // #325
+ INST(Kandnq , VexRvm , V(000F00,42,_,1,1,_,_,_ ), 0 , 61 , 0 , 1457 , 83 , 62 ), // #326
+ INST(Kandnw , VexRvm , V(000F00,42,_,1,0,_,_,_ ), 0 , 62 , 0 , 1464 , 83 , 63 ), // #327
+ INST(Kandq , VexRvm , V(000F00,41,_,1,1,_,_,_ ), 0 , 61 , 0 , 1471 , 83 , 62 ), // #328
+ INST(Kandw , VexRvm , V(000F00,41,_,1,0,_,_,_ ), 0 , 62 , 0 , 1477 , 83 , 63 ), // #329
+ INST(Kmovb , VexKmov , V(660F00,90,_,0,0,_,_,_ ), V(660F00,92,_,0,0,_,_,_ ), 63 , 36 , 1483 , 84 , 61 ), // #330
+ INST(Kmovd , VexKmov , V(660F00,90,_,0,1,_,_,_ ), V(F20F00,92,_,0,0,_,_,_ ), 64 , 37 , 7770 , 85 , 62 ), // #331
+ INST(Kmovq , VexKmov , V(000F00,90,_,0,1,_,_,_ ), V(F20F00,92,_,0,1,_,_,_ ), 65 , 38 , 7781 , 86 , 62 ), // #332
+ INST(Kmovw , VexKmov , V(000F00,90,_,0,0,_,_,_ ), V(000F00,92,_,0,0,_,_,_ ), 66 , 39 , 1489 , 87 , 63 ), // #333
+ INST(Knotb , VexRm , V(660F00,44,_,0,0,_,_,_ ), 0 , 63 , 0 , 1495 , 88 , 61 ), // #334
+ INST(Knotd , VexRm , V(660F00,44,_,0,1,_,_,_ ), 0 , 64 , 0 , 1501 , 88 , 62 ), // #335
+ INST(Knotq , VexRm , V(000F00,44,_,0,1,_,_,_ ), 0 , 65 , 0 , 1507 , 88 , 62 ), // #336
+ INST(Knotw , VexRm , V(000F00,44,_,0,0,_,_,_ ), 0 , 66 , 0 , 1513 , 88 , 63 ), // #337
+ INST(Korb , VexRvm , V(660F00,45,_,1,0,_,_,_ ), 0 , 59 , 0 , 1519 , 83 , 61 ), // #338
+ INST(Kord , VexRvm , V(660F00,45,_,1,1,_,_,_ ), 0 , 60 , 0 , 1524 , 83 , 62 ), // #339
+ INST(Korq , VexRvm , V(000F00,45,_,1,1,_,_,_ ), 0 , 61 , 0 , 1529 , 83 , 62 ), // #340
+ INST(Kortestb , VexRm , V(660F00,98,_,0,0,_,_,_ ), 0 , 63 , 0 , 1534 , 88 , 64 ), // #341
+ INST(Kortestd , VexRm , V(660F00,98,_,0,1,_,_,_ ), 0 , 64 , 0 , 1543 , 88 , 65 ), // #342
+ INST(Kortestq , VexRm , V(000F00,98,_,0,1,_,_,_ ), 0 , 65 , 0 , 1552 , 88 , 65 ), // #343
+ INST(Kortestw , VexRm , V(000F00,98,_,0,0,_,_,_ ), 0 , 66 , 0 , 1561 , 88 , 66 ), // #344
+ INST(Korw , VexRvm , V(000F00,45,_,1,0,_,_,_ ), 0 , 62 , 0 , 1570 , 83 , 63 ), // #345
+ INST(Kshiftlb , VexRmi , V(660F3A,32,_,0,0,_,_,_ ), 0 , 67 , 0 , 1575 , 89 , 61 ), // #346
+ INST(Kshiftld , VexRmi , V(660F3A,33,_,0,0,_,_,_ ), 0 , 67 , 0 , 1584 , 89 , 62 ), // #347
+ INST(Kshiftlq , VexRmi , V(660F3A,33,_,0,1,_,_,_ ), 0 , 68 , 0 , 1593 , 89 , 62 ), // #348
+ INST(Kshiftlw , VexRmi , V(660F3A,32,_,0,1,_,_,_ ), 0 , 68 , 0 , 1602 , 89 , 63 ), // #349
+ INST(Kshiftrb , VexRmi , V(660F3A,30,_,0,0,_,_,_ ), 0 , 67 , 0 , 1611 , 89 , 61 ), // #350
+ INST(Kshiftrd , VexRmi , V(660F3A,31,_,0,0,_,_,_ ), 0 , 67 , 0 , 1620 , 89 , 62 ), // #351
+ INST(Kshiftrq , VexRmi , V(660F3A,31,_,0,1,_,_,_ ), 0 , 68 , 0 , 1629 , 89 , 62 ), // #352
+ INST(Kshiftrw , VexRmi , V(660F3A,30,_,0,1,_,_,_ ), 0 , 68 , 0 , 1638 , 89 , 63 ), // #353
+ INST(Ktestb , VexRm , V(660F00,99,_,0,0,_,_,_ ), 0 , 63 , 0 , 1647 , 88 , 64 ), // #354
+ INST(Ktestd , VexRm , V(660F00,99,_,0,1,_,_,_ ), 0 , 64 , 0 , 1654 , 88 , 65 ), // #355
+ INST(Ktestq , VexRm , V(000F00,99,_,0,1,_,_,_ ), 0 , 65 , 0 , 1661 , 88 , 65 ), // #356
+ INST(Ktestw , VexRm , V(000F00,99,_,0,0,_,_,_ ), 0 , 66 , 0 , 1668 , 88 , 64 ), // #357
+ INST(Kunpckbw , VexRvm , V(660F00,4B,_,1,0,_,_,_ ), 0 , 59 , 0 , 1675 , 83 , 63 ), // #358
+ INST(Kunpckdq , VexRvm , V(000F00,4B,_,1,1,_,_,_ ), 0 , 61 , 0 , 1684 , 83 , 62 ), // #359
+ INST(Kunpckwd , VexRvm , V(000F00,4B,_,1,0,_,_,_ ), 0 , 62 , 0 , 1693 , 83 , 62 ), // #360
+ INST(Kxnorb , VexRvm , V(660F00,46,_,1,0,_,_,_ ), 0 , 59 , 0 , 1702 , 83 , 61 ), // #361
+ INST(Kxnord , VexRvm , V(660F00,46,_,1,1,_,_,_ ), 0 , 60 , 0 , 1709 , 83 , 62 ), // #362
+ INST(Kxnorq , VexRvm , V(000F00,46,_,1,1,_,_,_ ), 0 , 61 , 0 , 1716 , 83 , 62 ), // #363
+ INST(Kxnorw , VexRvm , V(000F00,46,_,1,0,_,_,_ ), 0 , 62 , 0 , 1723 , 83 , 63 ), // #364
+ INST(Kxorb , VexRvm , V(660F00,47,_,1,0,_,_,_ ), 0 , 59 , 0 , 1730 , 83 , 61 ), // #365
+ INST(Kxord , VexRvm , V(660F00,47,_,1,1,_,_,_ ), 0 , 60 , 0 , 1736 , 83 , 62 ), // #366
+ INST(Kxorq , VexRvm , V(000F00,47,_,1,1,_,_,_ ), 0 , 61 , 0 , 1742 , 83 , 62 ), // #367
+ INST(Kxorw , VexRvm , V(000F00,47,_,1,0,_,_,_ ), 0 , 62 , 0 , 1748 , 83 , 63 ), // #368
+ INST(Lahf , X86Op , O(000000,9F,_,_,_,_,_,_ ), 0 , 0 , 0 , 1754 , 90 , 67 ), // #369
+ INST(Lar , X86Rm , O(000F00,02,_,_,_,_,_,_ ), 0 , 4 , 0 , 1759 , 91 , 10 ), // #370
+ INST(Lddqu , ExtRm , O(F20F00,F0,_,_,_,_,_,_ ), 0 , 5 , 0 , 5791 , 92 , 6 ), // #371
+ INST(Ldmxcsr , X86M_Only , O(000F00,AE,2,_,_,_,_,_ ), 0 , 69 , 0 , 5798 , 93 , 5 ), // #372
+ INST(Lds , X86Rm , O(000000,C5,_,_,_,_,_,_ ), 0 , 0 , 0 , 1763 , 94 , 0 ), // #373
+ INST(Lea , X86Lea , O(000000,8D,_,_,x,_,_,_ ), 0 , 0 , 0 , 1767 , 95 , 0 ), // #374
+ INST(Leave , X86Op , O(000000,C9,_,_,_,_,_,_ ), 0 , 0 , 0 , 1771 , 30 , 0 ), // #375
+ INST(Les , X86Rm , O(000000,C4,_,_,_,_,_,_ ), 0 , 0 , 0 , 1777 , 94 , 0 ), // #376
+ INST(Lfence , X86Fence , O(000F00,AE,5,_,_,_,_,_ ), 0 , 70 , 0 , 1781 , 30 , 4 ), // #377
+ INST(Lfs , X86Rm , O(000F00,B4,_,_,_,_,_,_ ), 0 , 4 , 0 , 1788 , 96 , 0 ), // #378
+ INST(Lgdt , X86M_Only , O(000F00,01,2,_,_,_,_,_ ), 0 , 69 , 0 , 1792 , 31 , 0 ), // #379
+ INST(Lgs , X86Rm , O(000F00,B5,_,_,_,_,_,_ ), 0 , 4 , 0 , 1797 , 96 , 0 ), // #380
+ INST(Lidt , X86M_Only , O(000F00,01,3,_,_,_,_,_ ), 0 , 71 , 0 , 1801 , 31 , 0 ), // #381
+ INST(Lldt , X86M_NoSize , O(000F00,00,2,_,_,_,_,_ ), 0 , 69 , 0 , 1806 , 97 , 0 ), // #382
+ INST(Llwpcb , VexR_Wx , V(XOP_M9,12,0,0,x,_,_,_ ), 0 , 72 , 0 , 1811 , 98 , 68 ), // #383
+ INST(Lmsw , X86M_NoSize , O(000F00,01,6,_,_,_,_,_ ), 0 , 73 , 0 , 1818 , 97 , 0 ), // #384
+ INST(Lods , X86StrRm , O(000000,AC,_,_,_,_,_,_ ), 0 , 0 , 0 , 1823 , 99 , 69 ), // #385
+ INST(Loop , X86JecxzLoop , 0 , O(000000,E2,_,_,_,_,_,_ ), 0 , 40 , 1828 , 100, 0 ), // #386
+ INST(Loope , X86JecxzLoop , 0 , O(000000,E1,_,_,_,_,_,_ ), 0 , 41 , 1833 , 100, 56 ), // #387
+ INST(Loopne , X86JecxzLoop , 0 , O(000000,E0,_,_,_,_,_,_ ), 0 , 42 , 1839 , 100, 56 ), // #388
+ INST(Lsl , X86Rm , O(000F00,03,_,_,_,_,_,_ ), 0 , 4 , 0 , 1846 , 101, 10 ), // #389
+ INST(Lss , X86Rm , O(000F00,B2,_,_,_,_,_,_ ), 0 , 4 , 0 , 6289 , 96 , 0 ), // #390
+ INST(Ltr , X86M_NoSize , O(000F00,00,3,_,_,_,_,_ ), 0 , 71 , 0 , 1850 , 97 , 0 ), // #391
+ INST(Lwpins , VexVmi4_Wx , V(XOP_MA,12,0,0,x,_,_,_ ), 0 , 74 , 0 , 1854 , 102, 68 ), // #392
+ INST(Lwpval , VexVmi4_Wx , V(XOP_MA,12,1,0,x,_,_,_ ), 0 , 75 , 0 , 1861 , 102, 68 ), // #393
+ INST(Lzcnt , X86Rm_Raw66H , O(F30F00,BD,_,_,x,_,_,_ ), 0 , 6 , 0 , 1868 , 22 , 70 ), // #394
+ INST(Maskmovdqu , ExtRm_ZDI , O(660F00,57,_,_,_,_,_,_ ), 0 , 3 , 0 , 5807 , 103, 4 ), // #395
+ INST(Maskmovq , ExtRm_ZDI , O(000F00,F7,_,_,_,_,_,_ ), 0 , 4 , 0 , 7778 , 104, 71 ), // #396
+ INST(Maxpd , ExtRm , O(660F00,5F,_,_,_,_,_,_ ), 0 , 3 , 0 , 5841 , 5 , 4 ), // #397
+ INST(Maxps , ExtRm , O(000F00,5F,_,_,_,_,_,_ ), 0 , 4 , 0 , 5848 , 5 , 5 ), // #398
+ INST(Maxsd , ExtRm , O(F20F00,5F,_,_,_,_,_,_ ), 0 , 5 , 0 , 7797 , 6 , 4 ), // #399
+ INST(Maxss , ExtRm , O(F30F00,5F,_,_,_,_,_,_ ), 0 , 6 , 0 , 5862 , 7 , 5 ), // #400
+ INST(Mfence , X86Fence , O(000F00,AE,6,_,_,_,_,_ ), 0 , 73 , 0 , 1874 , 30 , 4 ), // #401
+ INST(Minpd , ExtRm , O(660F00,5D,_,_,_,_,_,_ ), 0 , 3 , 0 , 5891 , 5 , 4 ), // #402
+ INST(Minps , ExtRm , O(000F00,5D,_,_,_,_,_,_ ), 0 , 4 , 0 , 5898 , 5 , 5 ), // #403
+ INST(Minsd , ExtRm , O(F20F00,5D,_,_,_,_,_,_ ), 0 , 5 , 0 , 7861 , 6 , 4 ), // #404
+ INST(Minss , ExtRm , O(F30F00,5D,_,_,_,_,_,_ ), 0 , 6 , 0 , 5912 , 7 , 5 ), // #405
+ INST(Monitor , X86Op , O(000F01,C8,_,_,_,_,_,_ ), 0 , 21 , 0 , 1881 , 105, 72 ), // #406
+ INST(Monitorx , X86Op , O(000F01,FA,_,_,_,_,_,_ ), 0 , 21 , 0 , 1889 , 105, 73 ), // #407
+ INST(Mov , X86Mov , 0 , 0 , 0 , 0 , 138 , 106, 0 ), // #408
+ INST(Movapd , ExtMov , O(660F00,28,_,_,_,_,_,_ ), O(660F00,29,_,_,_,_,_,_ ), 3 , 43 , 5943 , 107, 4 ), // #409
+ INST(Movaps , ExtMov , O(000F00,28,_,_,_,_,_,_ ), O(000F00,29,_,_,_,_,_,_ ), 4 , 44 , 5951 , 107, 5 ), // #410
+ INST(Movbe , ExtMovbe , O(000F38,F0,_,_,x,_,_,_ ), O(000F38,F1,_,_,x,_,_,_ ), 76 , 45 , 626 , 108, 74 ), // #411
+ INST(Movd , ExtMovd , O(000F00,6E,_,_,_,_,_,_ ), O(000F00,7E,_,_,_,_,_,_ ), 4 , 46 , 7771 , 109, 75 ), // #412
+ INST(Movddup , ExtMov , O(F20F00,12,_,_,_,_,_,_ ), 0 , 5 , 0 , 5965 , 6 , 6 ), // #413
+ INST(Movdir64b , X86EnqcmdMovdir64b , O(660F38,F8,_,_,_,_,_,_ ), 0 , 2 , 0 , 1898 , 110, 76 ), // #414
+ INST(Movdiri , X86MovntiMovdiri , O(000F38,F9,_,_,_,_,_,_ ), 0 , 76 , 0 , 1908 , 111, 77 ), // #415
+ INST(Movdq2q , ExtMov , O(F20F00,D6,_,_,_,_,_,_ ), 0 , 5 , 0 , 1916 , 112, 4 ), // #416
+ INST(Movdqa , ExtMov , O(660F00,6F,_,_,_,_,_,_ ), O(660F00,7F,_,_,_,_,_,_ ), 3 , 47 , 5974 , 107, 4 ), // #417
+ INST(Movdqu , ExtMov , O(F30F00,6F,_,_,_,_,_,_ ), O(F30F00,7F,_,_,_,_,_,_ ), 6 , 48 , 5811 , 107, 4 ), // #418
+ INST(Movhlps , ExtMov , O(000F00,12,_,_,_,_,_,_ ), 0 , 4 , 0 , 6049 , 113, 5 ), // #419
+ INST(Movhpd , ExtMov , O(660F00,16,_,_,_,_,_,_ ), O(660F00,17,_,_,_,_,_,_ ), 3 , 49 , 6058 , 114, 4 ), // #420
+ INST(Movhps , ExtMov , O(000F00,16,_,_,_,_,_,_ ), O(000F00,17,_,_,_,_,_,_ ), 4 , 50 , 6066 , 114, 5 ), // #421
+ INST(Movlhps , ExtMov , O(000F00,16,_,_,_,_,_,_ ), 0 , 4 , 0 , 6074 , 113, 5 ), // #422
+ INST(Movlpd , ExtMov , O(660F00,12,_,_,_,_,_,_ ), O(660F00,13,_,_,_,_,_,_ ), 3 , 51 , 6083 , 114, 4 ), // #423
+ INST(Movlps , ExtMov , O(000F00,12,_,_,_,_,_,_ ), O(000F00,13,_,_,_,_,_,_ ), 4 , 52 , 6091 , 114, 5 ), // #424
+ INST(Movmskpd , ExtMov , O(660F00,50,_,_,_,_,_,_ ), 0 , 3 , 0 , 6099 , 115, 4 ), // #425
+ INST(Movmskps , ExtMov , O(000F00,50,_,_,_,_,_,_ ), 0 , 4 , 0 , 6109 , 115, 5 ), // #426
+ INST(Movntdq , ExtMov , 0 , O(660F00,E7,_,_,_,_,_,_ ), 0 , 53 , 6119 , 116, 4 ), // #427
+ INST(Movntdqa , ExtMov , O(660F38,2A,_,_,_,_,_,_ ), 0 , 2 , 0 , 6128 , 92 , 12 ), // #428
+ INST(Movnti , X86MovntiMovdiri , O(000F00,C3,_,_,x,_,_,_ ), 0 , 4 , 0 , 1924 , 111, 4 ), // #429
+ INST(Movntpd , ExtMov , 0 , O(660F00,2B,_,_,_,_,_,_ ), 0 , 54 , 6138 , 116, 4 ), // #430
+ INST(Movntps , ExtMov , 0 , O(000F00,2B,_,_,_,_,_,_ ), 0 , 55 , 6147 , 116, 5 ), // #431
+ INST(Movntq , ExtMov , 0 , O(000F00,E7,_,_,_,_,_,_ ), 0 , 56 , 1931 , 117, 71 ), // #432
+ INST(Movntsd , ExtMov , 0 , O(F20F00,2B,_,_,_,_,_,_ ), 0 , 57 , 1938 , 118, 46 ), // #433
+ INST(Movntss , ExtMov , 0 , O(F30F00,2B,_,_,_,_,_,_ ), 0 , 58 , 1946 , 119, 46 ), // #434
+ INST(Movq , ExtMovq , O(000F00,6E,_,_,x,_,_,_ ), O(000F00,7E,_,_,x,_,_,_ ), 4 , 59 , 7782 , 120, 75 ), // #435
+ INST(Movq2dq , ExtRm , O(F30F00,D6,_,_,_,_,_,_ ), 0 , 6 , 0 , 1954 , 121, 4 ), // #436
+ INST(Movs , X86StrMm , O(000000,A4,_,_,_,_,_,_ ), 0 , 0 , 0 , 425 , 122, 69 ), // #437
+ INST(Movsd , ExtMov , O(F20F00,10,_,_,_,_,_,_ ), O(F20F00,11,_,_,_,_,_,_ ), 5 , 60 , 6162 , 123, 4 ), // #438
+ INST(Movshdup , ExtRm , O(F30F00,16,_,_,_,_,_,_ ), 0 , 6 , 0 , 6169 , 5 , 6 ), // #439
+ INST(Movsldup , ExtRm , O(F30F00,12,_,_,_,_,_,_ ), 0 , 6 , 0 , 6179 , 5 , 6 ), // #440
+ INST(Movss , ExtMov , O(F30F00,10,_,_,_,_,_,_ ), O(F30F00,11,_,_,_,_,_,_ ), 6 , 61 , 6189 , 124, 5 ), // #441
+ INST(Movsx , X86MovsxMovzx , O(000F00,BE,_,_,x,_,_,_ ), 0 , 4 , 0 , 1962 , 125, 0 ), // #442
+ INST(Movsxd , X86Rm , O(000000,63,_,_,1,_,_,_ ), 0 , 20 , 0 , 1968 , 126, 0 ), // #443
+ INST(Movupd , ExtMov , O(660F00,10,_,_,_,_,_,_ ), O(660F00,11,_,_,_,_,_,_ ), 3 , 62 , 6196 , 107, 4 ), // #444
+ INST(Movups , ExtMov , O(000F00,10,_,_,_,_,_,_ ), O(000F00,11,_,_,_,_,_,_ ), 4 , 63 , 6204 , 107, 5 ), // #445
+ INST(Movzx , X86MovsxMovzx , O(000F00,B6,_,_,x,_,_,_ ), 0 , 4 , 0 , 1975 , 125, 0 ), // #446
+ INST(Mpsadbw , ExtRmi , O(660F3A,42,_,_,_,_,_,_ ), 0 , 8 , 0 , 6212 , 8 , 12 ), // #447
+ INST(Mul , X86M_GPB_MulDiv , O(000000,F6,4,_,x,_,_,_ ), 0 , 9 , 0 , 798 , 52 , 1 ), // #448
+ INST(Mulpd , ExtRm , O(660F00,59,_,_,_,_,_,_ ), 0 , 3 , 0 , 6266 , 5 , 4 ), // #449
+ INST(Mulps , ExtRm , O(000F00,59,_,_,_,_,_,_ ), 0 , 4 , 0 , 6273 , 5 , 5 ), // #450
+ INST(Mulsd , ExtRm , O(F20F00,59,_,_,_,_,_,_ ), 0 , 5 , 0 , 6280 , 6 , 4 ), // #451
+ INST(Mulss , ExtRm , O(F30F00,59,_,_,_,_,_,_ ), 0 , 6 , 0 , 6287 , 7 , 5 ), // #452
+ INST(Mulx , VexRvm_ZDX_Wx , V(F20F38,F6,_,0,x,_,_,_ ), 0 , 77 , 0 , 1981 , 127, 78 ), // #453
+ INST(Mwait , X86Op , O(000F01,C9,_,_,_,_,_,_ ), 0 , 21 , 0 , 1986 , 128, 72 ), // #454
+ INST(Mwaitx , X86Op , O(000F01,FB,_,_,_,_,_,_ ), 0 , 21 , 0 , 1992 , 129, 73 ), // #455
+ INST(Neg , X86M_GPB , O(000000,F6,3,_,x,_,_,_ ), 0 , 78 , 0 , 1999 , 130, 1 ), // #456
+ INST(Nop , X86M_Nop , O(000000,90,_,_,_,_,_,_ ), 0 , 0 , 0 , 929 , 131, 0 ), // #457
+ INST(Not , X86M_GPB , O(000000,F6,2,_,x,_,_,_ ), 0 , 1 , 0 , 2003 , 130, 0 ), // #458
+ INST(Or , X86Arith , O(000000,08,1,_,x,_,_,_ ), 0 , 29 , 0 , 1138 , 132, 1 ), // #459
+ INST(Orpd , ExtRm , O(660F00,56,_,_,_,_,_,_ ), 0 , 3 , 0 , 9988 , 11 , 4 ), // #460
+ INST(Orps , ExtRm , O(000F00,56,_,_,_,_,_,_ ), 0 , 4 , 0 , 9995 , 11 , 5 ), // #461
+ INST(Out , X86Out , O(000000,EE,_,_,_,_,_,_ ), O(000000,E6,_,_,_,_,_,_ ), 0 , 64 , 2007 , 133, 0 ), // #462
+ INST(Outs , X86Outs , O(000000,6E,_,_,_,_,_,_ ), 0 , 0 , 0 , 2011 , 134, 0 ), // #463
+ INST(Pabsb , ExtRm_P , O(000F38,1C,_,_,_,_,_,_ ), 0 , 76 , 0 , 6341 , 135, 79 ), // #464
+ INST(Pabsd , ExtRm_P , O(000F38,1E,_,_,_,_,_,_ ), 0 , 76 , 0 , 6348 , 135, 79 ), // #465
+ INST(Pabsw , ExtRm_P , O(000F38,1D,_,_,_,_,_,_ ), 0 , 76 , 0 , 6362 , 135, 79 ), // #466
+ INST(Packssdw , ExtRm_P , O(000F00,6B,_,_,_,_,_,_ ), 0 , 4 , 0 , 6369 , 135, 75 ), // #467
+ INST(Packsswb , ExtRm_P , O(000F00,63,_,_,_,_,_,_ ), 0 , 4 , 0 , 6379 , 135, 75 ), // #468
+ INST(Packusdw , ExtRm , O(660F38,2B,_,_,_,_,_,_ ), 0 , 2 , 0 , 6389 , 5 , 12 ), // #469
+ INST(Packuswb , ExtRm_P , O(000F00,67,_,_,_,_,_,_ ), 0 , 4 , 0 , 6399 , 135, 75 ), // #470
+ INST(Paddb , ExtRm_P , O(000F00,FC,_,_,_,_,_,_ ), 0 , 4 , 0 , 6409 , 135, 75 ), // #471
+ INST(Paddd , ExtRm_P , O(000F00,FE,_,_,_,_,_,_ ), 0 , 4 , 0 , 6416 , 135, 75 ), // #472
+ INST(Paddq , ExtRm_P , O(000F00,D4,_,_,_,_,_,_ ), 0 , 4 , 0 , 6423 , 135, 4 ), // #473
+ INST(Paddsb , ExtRm_P , O(000F00,EC,_,_,_,_,_,_ ), 0 , 4 , 0 , 6430 , 135, 75 ), // #474
+ INST(Paddsw , ExtRm_P , O(000F00,ED,_,_,_,_,_,_ ), 0 , 4 , 0 , 6438 , 135, 75 ), // #475
+ INST(Paddusb , ExtRm_P , O(000F00,DC,_,_,_,_,_,_ ), 0 , 4 , 0 , 6446 , 135, 75 ), // #476
+ INST(Paddusw , ExtRm_P , O(000F00,DD,_,_,_,_,_,_ ), 0 , 4 , 0 , 6455 , 135, 75 ), // #477
+ INST(Paddw , ExtRm_P , O(000F00,FD,_,_,_,_,_,_ ), 0 , 4 , 0 , 6464 , 135, 75 ), // #478
+ INST(Palignr , ExtRmi_P , O(000F3A,0F,_,_,_,_,_,_ ), 0 , 79 , 0 , 6471 , 136, 6 ), // #479
+ INST(Pand , ExtRm_P , O(000F00,DB,_,_,_,_,_,_ ), 0 , 4 , 0 , 6480 , 137, 75 ), // #480
+ INST(Pandn , ExtRm_P , O(000F00,DF,_,_,_,_,_,_ ), 0 , 4 , 0 , 6493 , 138, 75 ), // #481
+ INST(Pause , X86Op , O(F30000,90,_,_,_,_,_,_ ), 0 , 80 , 0 , 2016 , 30 , 0 ), // #482
+ INST(Pavgb , ExtRm_P , O(000F00,E0,_,_,_,_,_,_ ), 0 , 4 , 0 , 6523 , 135, 80 ), // #483
+ INST(Pavgusb , Ext3dNow , O(000F0F,BF,_,_,_,_,_,_ ), 0 , 81 , 0 , 2022 , 139, 48 ), // #484
+ INST(Pavgw , ExtRm_P , O(000F00,E3,_,_,_,_,_,_ ), 0 , 4 , 0 , 6530 , 135, 80 ), // #485
+ INST(Pblendvb , ExtRm_XMM0 , O(660F38,10,_,_,_,_,_,_ ), 0 , 2 , 0 , 6546 , 15 , 12 ), // #486
+ INST(Pblendw , ExtRmi , O(660F3A,0E,_,_,_,_,_,_ ), 0 , 8 , 0 , 6556 , 8 , 12 ), // #487
+ INST(Pclmulqdq , ExtRmi , O(660F3A,44,_,_,_,_,_,_ ), 0 , 8 , 0 , 6649 , 8 , 81 ), // #488
+ INST(Pcmpeqb , ExtRm_P , O(000F00,74,_,_,_,_,_,_ ), 0 , 4 , 0 , 6681 , 138, 75 ), // #489
+ INST(Pcmpeqd , ExtRm_P , O(000F00,76,_,_,_,_,_,_ ), 0 , 4 , 0 , 6690 , 138, 75 ), // #490
+ INST(Pcmpeqq , ExtRm , O(660F38,29,_,_,_,_,_,_ ), 0 , 2 , 0 , 6699 , 140, 12 ), // #491
+ INST(Pcmpeqw , ExtRm_P , O(000F00,75,_,_,_,_,_,_ ), 0 , 4 , 0 , 6708 , 138, 75 ), // #492
+ INST(Pcmpestri , ExtRmi , O(660F3A,61,_,_,_,_,_,_ ), 0 , 8 , 0 , 6717 , 141, 82 ), // #493
+ INST(Pcmpestrm , ExtRmi , O(660F3A,60,_,_,_,_,_,_ ), 0 , 8 , 0 , 6728 , 142, 82 ), // #494
+ INST(Pcmpgtb , ExtRm_P , O(000F00,64,_,_,_,_,_,_ ), 0 , 4 , 0 , 6739 , 138, 75 ), // #495
+ INST(Pcmpgtd , ExtRm_P , O(000F00,66,_,_,_,_,_,_ ), 0 , 4 , 0 , 6748 , 138, 75 ), // #496
+ INST(Pcmpgtq , ExtRm , O(660F38,37,_,_,_,_,_,_ ), 0 , 2 , 0 , 6757 , 140, 42 ), // #497
+ INST(Pcmpgtw , ExtRm_P , O(000F00,65,_,_,_,_,_,_ ), 0 , 4 , 0 , 6766 , 138, 75 ), // #498
+ INST(Pcmpistri , ExtRmi , O(660F3A,63,_,_,_,_,_,_ ), 0 , 8 , 0 , 6775 , 143, 82 ), // #499
+ INST(Pcmpistrm , ExtRmi , O(660F3A,62,_,_,_,_,_,_ ), 0 , 8 , 0 , 6786 , 144, 82 ), // #500
+ INST(Pcommit , X86Op_O , O(660F00,AE,7,_,_,_,_,_ ), 0 , 23 , 0 , 2030 , 30 , 83 ), // #501
+ INST(Pdep , VexRvm_Wx , V(F20F38,F5,_,0,x,_,_,_ ), 0 , 77 , 0 , 2038 , 10 , 78 ), // #502
+ INST(Pext , VexRvm_Wx , V(F30F38,F5,_,0,x,_,_,_ ), 0 , 82 , 0 , 2043 , 10 , 78 ), // #503
+ INST(Pextrb , ExtExtract , O(000F3A,14,_,_,_,_,_,_ ), 0 , 79 , 0 , 7273 , 145, 12 ), // #504
+ INST(Pextrd , ExtExtract , O(000F3A,16,_,_,_,_,_,_ ), 0 , 79 , 0 , 7281 , 56 , 12 ), // #505
+ INST(Pextrq , ExtExtract , O(000F3A,16,_,_,1,_,_,_ ), 0 , 83 , 0 , 7289 , 146, 12 ), // #506
+ INST(Pextrw , ExtPextrw , O(000F00,C5,_,_,_,_,_,_ ), O(000F3A,15,_,_,_,_,_,_ ), 4 , 65 , 7297 , 147, 84 ), // #507
+ INST(Pf2id , Ext3dNow , O(000F0F,1D,_,_,_,_,_,_ ), 0 , 81 , 0 , 2048 , 139, 48 ), // #508
+ INST(Pf2iw , Ext3dNow , O(000F0F,1C,_,_,_,_,_,_ ), 0 , 81 , 0 , 2054 , 139, 85 ), // #509
+ INST(Pfacc , Ext3dNow , O(000F0F,AE,_,_,_,_,_,_ ), 0 , 81 , 0 , 2060 , 139, 48 ), // #510
+ INST(Pfadd , Ext3dNow , O(000F0F,9E,_,_,_,_,_,_ ), 0 , 81 , 0 , 2066 , 139, 48 ), // #511
+ INST(Pfcmpeq , Ext3dNow , O(000F0F,B0,_,_,_,_,_,_ ), 0 , 81 , 0 , 2072 , 139, 48 ), // #512
+ INST(Pfcmpge , Ext3dNow , O(000F0F,90,_,_,_,_,_,_ ), 0 , 81 , 0 , 2080 , 139, 48 ), // #513
+ INST(Pfcmpgt , Ext3dNow , O(000F0F,A0,_,_,_,_,_,_ ), 0 , 81 , 0 , 2088 , 139, 48 ), // #514
+ INST(Pfmax , Ext3dNow , O(000F0F,A4,_,_,_,_,_,_ ), 0 , 81 , 0 , 2096 , 139, 48 ), // #515
+ INST(Pfmin , Ext3dNow , O(000F0F,94,_,_,_,_,_,_ ), 0 , 81 , 0 , 2102 , 139, 48 ), // #516
+ INST(Pfmul , Ext3dNow , O(000F0F,B4,_,_,_,_,_,_ ), 0 , 81 , 0 , 2108 , 139, 48 ), // #517
+ INST(Pfnacc , Ext3dNow , O(000F0F,8A,_,_,_,_,_,_ ), 0 , 81 , 0 , 2114 , 139, 85 ), // #518
+ INST(Pfpnacc , Ext3dNow , O(000F0F,8E,_,_,_,_,_,_ ), 0 , 81 , 0 , 2121 , 139, 85 ), // #519
+ INST(Pfrcp , Ext3dNow , O(000F0F,96,_,_,_,_,_,_ ), 0 , 81 , 0 , 2129 , 139, 48 ), // #520
+ INST(Pfrcpit1 , Ext3dNow , O(000F0F,A6,_,_,_,_,_,_ ), 0 , 81 , 0 , 2135 , 139, 48 ), // #521
+ INST(Pfrcpit2 , Ext3dNow , O(000F0F,B6,_,_,_,_,_,_ ), 0 , 81 , 0 , 2144 , 139, 48 ), // #522
+ INST(Pfrcpv , Ext3dNow , O(000F0F,86,_,_,_,_,_,_ ), 0 , 81 , 0 , 2153 , 139, 86 ), // #523
+ INST(Pfrsqit1 , Ext3dNow , O(000F0F,A7,_,_,_,_,_,_ ), 0 , 81 , 0 , 2160 , 139, 48 ), // #524
+ INST(Pfrsqrt , Ext3dNow , O(000F0F,97,_,_,_,_,_,_ ), 0 , 81 , 0 , 2169 , 139, 48 ), // #525
+ INST(Pfrsqrtv , Ext3dNow , O(000F0F,87,_,_,_,_,_,_ ), 0 , 81 , 0 , 2177 , 139, 86 ), // #526
+ INST(Pfsub , Ext3dNow , O(000F0F,9A,_,_,_,_,_,_ ), 0 , 81 , 0 , 2186 , 139, 48 ), // #527
+ INST(Pfsubr , Ext3dNow , O(000F0F,AA,_,_,_,_,_,_ ), 0 , 81 , 0 , 2192 , 139, 48 ), // #528
+ INST(Phaddd , ExtRm_P , O(000F38,02,_,_,_,_,_,_ ), 0 , 76 , 0 , 7376 , 135, 79 ), // #529
+ INST(Phaddsw , ExtRm_P , O(000F38,03,_,_,_,_,_,_ ), 0 , 76 , 0 , 7393 , 135, 79 ), // #530
+ INST(Phaddw , ExtRm_P , O(000F38,01,_,_,_,_,_,_ ), 0 , 76 , 0 , 7462 , 135, 79 ), // #531
+ INST(Phminposuw , ExtRm , O(660F38,41,_,_,_,_,_,_ ), 0 , 2 , 0 , 7488 , 5 , 12 ), // #532
+ INST(Phsubd , ExtRm_P , O(000F38,06,_,_,_,_,_,_ ), 0 , 76 , 0 , 7509 , 135, 79 ), // #533
+ INST(Phsubsw , ExtRm_P , O(000F38,07,_,_,_,_,_,_ ), 0 , 76 , 0 , 7526 , 135, 79 ), // #534
+ INST(Phsubw , ExtRm_P , O(000F38,05,_,_,_,_,_,_ ), 0 , 76 , 0 , 7535 , 135, 79 ), // #535
+ INST(Pi2fd , Ext3dNow , O(000F0F,0D,_,_,_,_,_,_ ), 0 , 81 , 0 , 2199 , 139, 48 ), // #536
+ INST(Pi2fw , Ext3dNow , O(000F0F,0C,_,_,_,_,_,_ ), 0 , 81 , 0 , 2205 , 139, 85 ), // #537
+ INST(Pinsrb , ExtRmi , O(660F3A,20,_,_,_,_,_,_ ), 0 , 8 , 0 , 7552 , 148, 12 ), // #538
+ INST(Pinsrd , ExtRmi , O(660F3A,22,_,_,_,_,_,_ ), 0 , 8 , 0 , 7560 , 149, 12 ), // #539
+ INST(Pinsrq , ExtRmi , O(660F3A,22,_,_,1,_,_,_ ), 0 , 84 , 0 , 7568 , 150, 12 ), // #540
+ INST(Pinsrw , ExtRmi_P , O(000F00,C4,_,_,_,_,_,_ ), 0 , 4 , 0 , 7576 , 151, 80 ), // #541
+ INST(Pmaddubsw , ExtRm_P , O(000F38,04,_,_,_,_,_,_ ), 0 , 76 , 0 , 7746 , 135, 79 ), // #542
+ INST(Pmaddwd , ExtRm_P , O(000F00,F5,_,_,_,_,_,_ ), 0 , 4 , 0 , 7757 , 135, 75 ), // #543
+ INST(Pmaxsb , ExtRm , O(660F38,3C,_,_,_,_,_,_ ), 0 , 2 , 0 , 7788 , 11 , 12 ), // #544
+ INST(Pmaxsd , ExtRm , O(660F38,3D,_,_,_,_,_,_ ), 0 , 2 , 0 , 7796 , 11 , 12 ), // #545
+ INST(Pmaxsw , ExtRm_P , O(000F00,EE,_,_,_,_,_,_ ), 0 , 4 , 0 , 7812 , 137, 80 ), // #546
+ INST(Pmaxub , ExtRm_P , O(000F00,DE,_,_,_,_,_,_ ), 0 , 4 , 0 , 7820 , 137, 80 ), // #547
+ INST(Pmaxud , ExtRm , O(660F38,3F,_,_,_,_,_,_ ), 0 , 2 , 0 , 7828 , 11 , 12 ), // #548
+ INST(Pmaxuw , ExtRm , O(660F38,3E,_,_,_,_,_,_ ), 0 , 2 , 0 , 7844 , 11 , 12 ), // #549
+ INST(Pminsb , ExtRm , O(660F38,38,_,_,_,_,_,_ ), 0 , 2 , 0 , 7852 , 11 , 12 ), // #550
+ INST(Pminsd , ExtRm , O(660F38,39,_,_,_,_,_,_ ), 0 , 2 , 0 , 7860 , 11 , 12 ), // #551
+ INST(Pminsw , ExtRm_P , O(000F00,EA,_,_,_,_,_,_ ), 0 , 4 , 0 , 7876 , 137, 80 ), // #552
+ INST(Pminub , ExtRm_P , O(000F00,DA,_,_,_,_,_,_ ), 0 , 4 , 0 , 7884 , 137, 80 ), // #553
+ INST(Pminud , ExtRm , O(660F38,3B,_,_,_,_,_,_ ), 0 , 2 , 0 , 7892 , 11 , 12 ), // #554
+ INST(Pminuw , ExtRm , O(660F38,3A,_,_,_,_,_,_ ), 0 , 2 , 0 , 7908 , 11 , 12 ), // #555
+ INST(Pmovmskb , ExtRm_P , O(000F00,D7,_,_,_,_,_,_ ), 0 , 4 , 0 , 7986 , 152, 80 ), // #556
+ INST(Pmovsxbd , ExtRm , O(660F38,21,_,_,_,_,_,_ ), 0 , 2 , 0 , 8083 , 7 , 12 ), // #557
+ INST(Pmovsxbq , ExtRm , O(660F38,22,_,_,_,_,_,_ ), 0 , 2 , 0 , 8093 , 153, 12 ), // #558
+ INST(Pmovsxbw , ExtRm , O(660F38,20,_,_,_,_,_,_ ), 0 , 2 , 0 , 8103 , 6 , 12 ), // #559
+ INST(Pmovsxdq , ExtRm , O(660F38,25,_,_,_,_,_,_ ), 0 , 2 , 0 , 8113 , 6 , 12 ), // #560
+ INST(Pmovsxwd , ExtRm , O(660F38,23,_,_,_,_,_,_ ), 0 , 2 , 0 , 8123 , 6 , 12 ), // #561
+ INST(Pmovsxwq , ExtRm , O(660F38,24,_,_,_,_,_,_ ), 0 , 2 , 0 , 8133 , 7 , 12 ), // #562
+ INST(Pmovzxbd , ExtRm , O(660F38,31,_,_,_,_,_,_ ), 0 , 2 , 0 , 8220 , 7 , 12 ), // #563
+ INST(Pmovzxbq , ExtRm , O(660F38,32,_,_,_,_,_,_ ), 0 , 2 , 0 , 8230 , 153, 12 ), // #564
+ INST(Pmovzxbw , ExtRm , O(660F38,30,_,_,_,_,_,_ ), 0 , 2 , 0 , 8240 , 6 , 12 ), // #565
+ INST(Pmovzxdq , ExtRm , O(660F38,35,_,_,_,_,_,_ ), 0 , 2 , 0 , 8250 , 6 , 12 ), // #566
+ INST(Pmovzxwd , ExtRm , O(660F38,33,_,_,_,_,_,_ ), 0 , 2 , 0 , 8260 , 6 , 12 ), // #567
+ INST(Pmovzxwq , ExtRm , O(660F38,34,_,_,_,_,_,_ ), 0 , 2 , 0 , 8270 , 7 , 12 ), // #568
+ INST(Pmuldq , ExtRm , O(660F38,28,_,_,_,_,_,_ ), 0 , 2 , 0 , 8280 , 5 , 12 ), // #569
+ INST(Pmulhrsw , ExtRm_P , O(000F38,0B,_,_,_,_,_,_ ), 0 , 76 , 0 , 8288 , 135, 79 ), // #570
+ INST(Pmulhrw , Ext3dNow , O(000F0F,B7,_,_,_,_,_,_ ), 0 , 81 , 0 , 2211 , 139, 48 ), // #571
+ INST(Pmulhuw , ExtRm_P , O(000F00,E4,_,_,_,_,_,_ ), 0 , 4 , 0 , 8298 , 135, 80 ), // #572
+ INST(Pmulhw , ExtRm_P , O(000F00,E5,_,_,_,_,_,_ ), 0 , 4 , 0 , 8307 , 135, 75 ), // #573
+ INST(Pmulld , ExtRm , O(660F38,40,_,_,_,_,_,_ ), 0 , 2 , 0 , 8315 , 5 , 12 ), // #574
+ INST(Pmullw , ExtRm_P , O(000F00,D5,_,_,_,_,_,_ ), 0 , 4 , 0 , 8331 , 135, 75 ), // #575
+ INST(Pmuludq , ExtRm_P , O(000F00,F4,_,_,_,_,_,_ ), 0 , 4 , 0 , 8354 , 135, 4 ), // #576
+ INST(Pop , X86Pop , O(000000,8F,0,_,_,_,_,_ ), O(000000,58,_,_,_,_,_,_ ), 0 , 66 , 2219 , 154, 0 ), // #577
+ INST(Popa , X86Op , O(660000,61,_,_,_,_,_,_ ), 0 , 19 , 0 , 2223 , 75 , 0 ), // #578
+ INST(Popad , X86Op , O(000000,61,_,_,_,_,_,_ ), 0 , 0 , 0 , 2228 , 75 , 0 ), // #579
+ INST(Popcnt , X86Rm_Raw66H , O(F30F00,B8,_,_,x,_,_,_ ), 0 , 6 , 0 , 2234 , 22 , 87 ), // #580
+ INST(Popf , X86Op , O(660000,9D,_,_,_,_,_,_ ), 0 , 19 , 0 , 2241 , 30 , 88 ), // #581
+ INST(Popfd , X86Op , O(000000,9D,_,_,_,_,_,_ ), 0 , 0 , 0 , 2246 , 75 , 88 ), // #582
+ INST(Popfq , X86Op , O(000000,9D,_,_,_,_,_,_ ), 0 , 0 , 0 , 2252 , 155, 88 ), // #583
+ INST(Por , ExtRm_P , O(000F00,EB,_,_,_,_,_,_ ), 0 , 4 , 0 , 8399 , 137, 75 ), // #584
+ INST(Prefetch , X86M_Only , O(000F00,0D,0,_,_,_,_,_ ), 0 , 4 , 0 , 2258 , 31 , 48 ), // #585
+ INST(Prefetchnta , X86M_Only , O(000F00,18,0,_,_,_,_,_ ), 0 , 4 , 0 , 2267 , 31 , 71 ), // #586
+ INST(Prefetcht0 , X86M_Only , O(000F00,18,1,_,_,_,_,_ ), 0 , 27 , 0 , 2279 , 31 , 71 ), // #587
+ INST(Prefetcht1 , X86M_Only , O(000F00,18,2,_,_,_,_,_ ), 0 , 69 , 0 , 2290 , 31 , 71 ), // #588
+ INST(Prefetcht2 , X86M_Only , O(000F00,18,3,_,_,_,_,_ ), 0 , 71 , 0 , 2301 , 31 , 71 ), // #589
+ INST(Prefetchw , X86M_Only , O(000F00,0D,1,_,_,_,_,_ ), 0 , 27 , 0 , 2312 , 31 , 89 ), // #590
+ INST(Prefetchwt1 , X86M_Only , O(000F00,0D,2,_,_,_,_,_ ), 0 , 69 , 0 , 2322 , 31 , 90 ), // #591
+ INST(Psadbw , ExtRm_P , O(000F00,F6,_,_,_,_,_,_ ), 0 , 4 , 0 , 3980 , 135, 80 ), // #592
+ INST(Pshufb , ExtRm_P , O(000F38,00,_,_,_,_,_,_ ), 0 , 76 , 0 , 8725 , 135, 79 ), // #593
+ INST(Pshufd , ExtRmi , O(660F00,70,_,_,_,_,_,_ ), 0 , 3 , 0 , 8746 , 8 , 4 ), // #594
+ INST(Pshufhw , ExtRmi , O(F30F00,70,_,_,_,_,_,_ ), 0 , 6 , 0 , 8754 , 8 , 4 ), // #595
+ INST(Pshuflw , ExtRmi , O(F20F00,70,_,_,_,_,_,_ ), 0 , 5 , 0 , 8763 , 8 , 4 ), // #596
+ INST(Pshufw , ExtRmi_P , O(000F00,70,_,_,_,_,_,_ ), 0 , 4 , 0 , 2334 , 156, 71 ), // #597
+ INST(Psignb , ExtRm_P , O(000F38,08,_,_,_,_,_,_ ), 0 , 76 , 0 , 8772 , 135, 79 ), // #598
+ INST(Psignd , ExtRm_P , O(000F38,0A,_,_,_,_,_,_ ), 0 , 76 , 0 , 8780 , 135, 79 ), // #599
+ INST(Psignw , ExtRm_P , O(000F38,09,_,_,_,_,_,_ ), 0 , 76 , 0 , 8788 , 135, 79 ), // #600
+ INST(Pslld , ExtRmRi_P , O(000F00,F2,_,_,_,_,_,_ ), O(000F00,72,6,_,_,_,_,_ ), 4 , 67 , 8796 , 157, 75 ), // #601
+ INST(Pslldq , ExtRmRi , 0 , O(660F00,73,7,_,_,_,_,_ ), 0 , 68 , 8803 , 158, 4 ), // #602
+ INST(Psllq , ExtRmRi_P , O(000F00,F3,_,_,_,_,_,_ ), O(000F00,73,6,_,_,_,_,_ ), 4 , 69 , 8811 , 157, 75 ), // #603
+ INST(Psllw , ExtRmRi_P , O(000F00,F1,_,_,_,_,_,_ ), O(000F00,71,6,_,_,_,_,_ ), 4 , 70 , 8842 , 157, 75 ), // #604
+ INST(Psrad , ExtRmRi_P , O(000F00,E2,_,_,_,_,_,_ ), O(000F00,72,4,_,_,_,_,_ ), 4 , 71 , 8849 , 157, 75 ), // #605
+ INST(Psraw , ExtRmRi_P , O(000F00,E1,_,_,_,_,_,_ ), O(000F00,71,4,_,_,_,_,_ ), 4 , 72 , 8887 , 157, 75 ), // #606
+ INST(Psrld , ExtRmRi_P , O(000F00,D2,_,_,_,_,_,_ ), O(000F00,72,2,_,_,_,_,_ ), 4 , 73 , 8894 , 157, 75 ), // #607
+ INST(Psrldq , ExtRmRi , 0 , O(660F00,73,3,_,_,_,_,_ ), 0 , 74 , 8901 , 158, 4 ), // #608
+ INST(Psrlq , ExtRmRi_P , O(000F00,D3,_,_,_,_,_,_ ), O(000F00,73,2,_,_,_,_,_ ), 4 , 75 , 8909 , 157, 75 ), // #609
+ INST(Psrlw , ExtRmRi_P , O(000F00,D1,_,_,_,_,_,_ ), O(000F00,71,2,_,_,_,_,_ ), 4 , 76 , 8940 , 157, 75 ), // #610
+ INST(Psubb , ExtRm_P , O(000F00,F8,_,_,_,_,_,_ ), 0 , 4 , 0 , 8947 , 138, 75 ), // #611
+ INST(Psubd , ExtRm_P , O(000F00,FA,_,_,_,_,_,_ ), 0 , 4 , 0 , 8954 , 138, 75 ), // #612
+ INST(Psubq , ExtRm_P , O(000F00,FB,_,_,_,_,_,_ ), 0 , 4 , 0 , 8961 , 138, 4 ), // #613
+ INST(Psubsb , ExtRm_P , O(000F00,E8,_,_,_,_,_,_ ), 0 , 4 , 0 , 8968 , 138, 75 ), // #614
+ INST(Psubsw , ExtRm_P , O(000F00,E9,_,_,_,_,_,_ ), 0 , 4 , 0 , 8976 , 138, 75 ), // #615
+ INST(Psubusb , ExtRm_P , O(000F00,D8,_,_,_,_,_,_ ), 0 , 4 , 0 , 8984 , 138, 75 ), // #616
+ INST(Psubusw , ExtRm_P , O(000F00,D9,_,_,_,_,_,_ ), 0 , 4 , 0 , 8993 , 138, 75 ), // #617
+ INST(Psubw , ExtRm_P , O(000F00,F9,_,_,_,_,_,_ ), 0 , 4 , 0 , 9002 , 138, 75 ), // #618
+ INST(Pswapd , Ext3dNow , O(000F0F,BB,_,_,_,_,_,_ ), 0 , 81 , 0 , 2341 , 139, 85 ), // #619
+ INST(Ptest , ExtRm , O(660F38,17,_,_,_,_,_,_ ), 0 , 2 , 0 , 9031 , 5 , 91 ), // #620
+ INST(Punpckhbw , ExtRm_P , O(000F00,68,_,_,_,_,_,_ ), 0 , 4 , 0 , 9114 , 135, 75 ), // #621
+ INST(Punpckhdq , ExtRm_P , O(000F00,6A,_,_,_,_,_,_ ), 0 , 4 , 0 , 9125 , 135, 75 ), // #622
+ INST(Punpckhqdq , ExtRm , O(660F00,6D,_,_,_,_,_,_ ), 0 , 3 , 0 , 9136 , 5 , 4 ), // #623
+ INST(Punpckhwd , ExtRm_P , O(000F00,69,_,_,_,_,_,_ ), 0 , 4 , 0 , 9148 , 135, 75 ), // #624
+ INST(Punpcklbw , ExtRm_P , O(000F00,60,_,_,_,_,_,_ ), 0 , 4 , 0 , 9159 , 135, 75 ), // #625
+ INST(Punpckldq , ExtRm_P , O(000F00,62,_,_,_,_,_,_ ), 0 , 4 , 0 , 9170 , 135, 75 ), // #626
+ INST(Punpcklqdq , ExtRm , O(660F00,6C,_,_,_,_,_,_ ), 0 , 3 , 0 , 9181 , 5 , 4 ), // #627
+ INST(Punpcklwd , ExtRm_P , O(000F00,61,_,_,_,_,_,_ ), 0 , 4 , 0 , 9193 , 135, 75 ), // #628
+ INST(Push , X86Push , O(000000,FF,6,_,_,_,_,_ ), O(000000,50,_,_,_,_,_,_ ), 30 , 77 , 2348 , 159, 0 ), // #629
+ INST(Pusha , X86Op , O(660000,60,_,_,_,_,_,_ ), 0 , 19 , 0 , 2353 , 75 , 0 ), // #630
+ INST(Pushad , X86Op , O(000000,60,_,_,_,_,_,_ ), 0 , 0 , 0 , 2359 , 75 , 0 ), // #631
+ INST(Pushf , X86Op , O(660000,9C,_,_,_,_,_,_ ), 0 , 19 , 0 , 2366 , 30 , 92 ), // #632
+ INST(Pushfd , X86Op , O(000000,9C,_,_,_,_,_,_ ), 0 , 0 , 0 , 2372 , 75 , 92 ), // #633
+ INST(Pushfq , X86Op , O(000000,9C,_,_,_,_,_,_ ), 0 , 0 , 0 , 2379 , 155, 92 ), // #634
+ INST(Pxor , ExtRm_P , O(000F00,EF,_,_,_,_,_,_ ), 0 , 4 , 0 , 9204 , 138, 75 ), // #635
+ INST(Rcl , X86Rot , O(000000,D0,2,_,x,_,_,_ ), 0 , 1 , 0 , 2386 , 160, 93 ), // #636
+ INST(Rcpps , ExtRm , O(000F00,53,_,_,_,_,_,_ ), 0 , 4 , 0 , 9332 , 5 , 5 ), // #637
+ INST(Rcpss , ExtRm , O(F30F00,53,_,_,_,_,_,_ ), 0 , 6 , 0 , 9339 , 7 , 5 ), // #638
+ INST(Rcr , X86Rot , O(000000,D0,3,_,x,_,_,_ ), 0 , 78 , 0 , 2390 , 160, 93 ), // #639
+ INST(Rdfsbase , X86M , O(F30F00,AE,0,_,x,_,_,_ ), 0 , 6 , 0 , 2394 , 161, 94 ), // #640
+ INST(Rdgsbase , X86M , O(F30F00,AE,1,_,x,_,_,_ ), 0 , 85 , 0 , 2403 , 161, 94 ), // #641
+ INST(Rdmsr , X86Op , O(000F00,32,_,_,_,_,_,_ ), 0 , 4 , 0 , 2412 , 162, 95 ), // #642
+ INST(Rdpid , X86R_Native , O(F30F00,C7,7,_,_,_,_,_ ), 0 , 86 , 0 , 2418 , 163, 96 ), // #643
+ INST(Rdpmc , X86Op , O(000F00,33,_,_,_,_,_,_ ), 0 , 4 , 0 , 2424 , 162, 0 ), // #644
+ INST(Rdrand , X86M , O(000F00,C7,6,_,x,_,_,_ ), 0 , 73 , 0 , 2430 , 23 , 97 ), // #645
+ INST(Rdseed , X86M , O(000F00,C7,7,_,x,_,_,_ ), 0 , 22 , 0 , 2437 , 23 , 98 ), // #646
+ INST(Rdtsc , X86Op , O(000F00,31,_,_,_,_,_,_ ), 0 , 4 , 0 , 2444 , 28 , 99 ), // #647
+ INST(Rdtscp , X86Op , O(000F01,F9,_,_,_,_,_,_ ), 0 , 21 , 0 , 2450 , 162, 100), // #648
+ INST(Ret , X86Ret , O(000000,C2,_,_,_,_,_,_ ), 0 , 0 , 0 , 2883 , 164, 0 ), // #649
+ INST(Rol , X86Rot , O(000000,D0,0,_,x,_,_,_ ), 0 , 0 , 0 , 2457 , 160, 101), // #650
+ INST(Ror , X86Rot , O(000000,D0,1,_,x,_,_,_ ), 0 , 29 , 0 , 2461 , 160, 101), // #651
+ INST(Rorx , VexRmi_Wx , V(F20F3A,F0,_,0,x,_,_,_ ), 0 , 87 , 0 , 2465 , 165, 78 ), // #652
+ INST(Roundpd , ExtRmi , O(660F3A,09,_,_,_,_,_,_ ), 0 , 8 , 0 , 9434 , 8 , 12 ), // #653
+ INST(Roundps , ExtRmi , O(660F3A,08,_,_,_,_,_,_ ), 0 , 8 , 0 , 9443 , 8 , 12 ), // #654
+ INST(Roundsd , ExtRmi , O(660F3A,0B,_,_,_,_,_,_ ), 0 , 8 , 0 , 9452 , 35 , 12 ), // #655
+ INST(Roundss , ExtRmi , O(660F3A,0A,_,_,_,_,_,_ ), 0 , 8 , 0 , 9461 , 36 , 12 ), // #656
+ INST(Rsm , X86Op , O(000F00,AA,_,_,_,_,_,_ ), 0 , 4 , 0 , 2470 , 75 , 1 ), // #657
+ INST(Rsqrtps , ExtRm , O(000F00,52,_,_,_,_,_,_ ), 0 , 4 , 0 , 9558 , 5 , 5 ), // #658
+ INST(Rsqrtss , ExtRm , O(F30F00,52,_,_,_,_,_,_ ), 0 , 6 , 0 , 9567 , 7 , 5 ), // #659
+ INST(Sahf , X86Op , O(000000,9E,_,_,_,_,_,_ ), 0 , 0 , 0 , 2474 , 90 , 102), // #660
+ INST(Sal , X86Rot , O(000000,D0,4,_,x,_,_,_ ), 0 , 9 , 0 , 2479 , 160, 1 ), // #661
+ INST(Sar , X86Rot , O(000000,D0,7,_,x,_,_,_ ), 0 , 25 , 0 , 2483 , 160, 1 ), // #662
+ INST(Sarx , VexRmv_Wx , V(F30F38,F7,_,0,x,_,_,_ ), 0 , 82 , 0 , 2487 , 13 , 78 ), // #663
+ INST(Sbb , X86Arith , O(000000,18,3,_,x,_,_,_ ), 0 , 78 , 0 , 2492 , 166, 2 ), // #664
+ INST(Scas , X86StrRm , O(000000,AE,_,_,_,_,_,_ ), 0 , 0 , 0 , 2496 , 167, 35 ), // #665
+ INST(Seta , X86Set , O(000F00,97,_,_,_,_,_,_ ), 0 , 4 , 0 , 2501 , 168, 54 ), // #666
+ INST(Setae , X86Set , O(000F00,93,_,_,_,_,_,_ ), 0 , 4 , 0 , 2506 , 168, 55 ), // #667
+ INST(Setb , X86Set , O(000F00,92,_,_,_,_,_,_ ), 0 , 4 , 0 , 2512 , 168, 55 ), // #668
+ INST(Setbe , X86Set , O(000F00,96,_,_,_,_,_,_ ), 0 , 4 , 0 , 2517 , 168, 54 ), // #669
+ INST(Setc , X86Set , O(000F00,92,_,_,_,_,_,_ ), 0 , 4 , 0 , 2523 , 168, 55 ), // #670
+ INST(Sete , X86Set , O(000F00,94,_,_,_,_,_,_ ), 0 , 4 , 0 , 2528 , 168, 56 ), // #671
+ INST(Setg , X86Set , O(000F00,9F,_,_,_,_,_,_ ), 0 , 4 , 0 , 2533 , 168, 57 ), // #672
+ INST(Setge , X86Set , O(000F00,9D,_,_,_,_,_,_ ), 0 , 4 , 0 , 2538 , 168, 58 ), // #673
+ INST(Setl , X86Set , O(000F00,9C,_,_,_,_,_,_ ), 0 , 4 , 0 , 2544 , 168, 58 ), // #674
+ INST(Setle , X86Set , O(000F00,9E,_,_,_,_,_,_ ), 0 , 4 , 0 , 2549 , 168, 57 ), // #675
+ INST(Setna , X86Set , O(000F00,96,_,_,_,_,_,_ ), 0 , 4 , 0 , 2555 , 168, 54 ), // #676
+ INST(Setnae , X86Set , O(000F00,92,_,_,_,_,_,_ ), 0 , 4 , 0 , 2561 , 168, 55 ), // #677
+ INST(Setnb , X86Set , O(000F00,93,_,_,_,_,_,_ ), 0 , 4 , 0 , 2568 , 168, 55 ), // #678
+ INST(Setnbe , X86Set , O(000F00,97,_,_,_,_,_,_ ), 0 , 4 , 0 , 2574 , 168, 54 ), // #679
+ INST(Setnc , X86Set , O(000F00,93,_,_,_,_,_,_ ), 0 , 4 , 0 , 2581 , 168, 55 ), // #680
+ INST(Setne , X86Set , O(000F00,95,_,_,_,_,_,_ ), 0 , 4 , 0 , 2587 , 168, 56 ), // #681
+ INST(Setng , X86Set , O(000F00,9E,_,_,_,_,_,_ ), 0 , 4 , 0 , 2593 , 168, 57 ), // #682
+ INST(Setnge , X86Set , O(000F00,9C,_,_,_,_,_,_ ), 0 , 4 , 0 , 2599 , 168, 58 ), // #683
+ INST(Setnl , X86Set , O(000F00,9D,_,_,_,_,_,_ ), 0 , 4 , 0 , 2606 , 168, 58 ), // #684
+ INST(Setnle , X86Set , O(000F00,9F,_,_,_,_,_,_ ), 0 , 4 , 0 , 2612 , 168, 57 ), // #685
+ INST(Setno , X86Set , O(000F00,91,_,_,_,_,_,_ ), 0 , 4 , 0 , 2619 , 168, 52 ), // #686
+ INST(Setnp , X86Set , O(000F00,9B,_,_,_,_,_,_ ), 0 , 4 , 0 , 2625 , 168, 59 ), // #687
+ INST(Setns , X86Set , O(000F00,99,_,_,_,_,_,_ ), 0 , 4 , 0 , 2631 , 168, 60 ), // #688
+ INST(Setnz , X86Set , O(000F00,95,_,_,_,_,_,_ ), 0 , 4 , 0 , 2637 , 168, 56 ), // #689
+ INST(Seto , X86Set , O(000F00,90,_,_,_,_,_,_ ), 0 , 4 , 0 , 2643 , 168, 52 ), // #690
+ INST(Setp , X86Set , O(000F00,9A,_,_,_,_,_,_ ), 0 , 4 , 0 , 2648 , 168, 59 ), // #691
+ INST(Setpe , X86Set , O(000F00,9A,_,_,_,_,_,_ ), 0 , 4 , 0 , 2653 , 168, 59 ), // #692
+ INST(Setpo , X86Set , O(000F00,9B,_,_,_,_,_,_ ), 0 , 4 , 0 , 2659 , 168, 59 ), // #693
+ INST(Sets , X86Set , O(000F00,98,_,_,_,_,_,_ ), 0 , 4 , 0 , 2665 , 168, 60 ), // #694
+ INST(Setz , X86Set , O(000F00,94,_,_,_,_,_,_ ), 0 , 4 , 0 , 2670 , 168, 56 ), // #695
+ INST(Sfence , X86Fence , O(000F00,AE,7,_,_,_,_,_ ), 0 , 22 , 0 , 2675 , 30 , 71 ), // #696
+ INST(Sgdt , X86M_Only , O(000F00,01,0,_,_,_,_,_ ), 0 , 4 , 0 , 2682 , 31 , 0 ), // #697
+ INST(Sha1msg1 , ExtRm , O(000F38,C9,_,_,_,_,_,_ ), 0 , 76 , 0 , 2687 , 5 , 103), // #698
+ INST(Sha1msg2 , ExtRm , O(000F38,CA,_,_,_,_,_,_ ), 0 , 76 , 0 , 2696 , 5 , 103), // #699
+ INST(Sha1nexte , ExtRm , O(000F38,C8,_,_,_,_,_,_ ), 0 , 76 , 0 , 2705 , 5 , 103), // #700
+ INST(Sha1rnds4 , ExtRmi , O(000F3A,CC,_,_,_,_,_,_ ), 0 , 79 , 0 , 2715 , 8 , 103), // #701
+ INST(Sha256msg1 , ExtRm , O(000F38,CC,_,_,_,_,_,_ ), 0 , 76 , 0 , 2725 , 5 , 103), // #702
+ INST(Sha256msg2 , ExtRm , O(000F38,CD,_,_,_,_,_,_ ), 0 , 76 , 0 , 2736 , 5 , 103), // #703
+ INST(Sha256rnds2 , ExtRm_XMM0 , O(000F38,CB,_,_,_,_,_,_ ), 0 , 76 , 0 , 2747 , 15 , 103), // #704
+ INST(Shl , X86Rot , O(000000,D0,4,_,x,_,_,_ ), 0 , 9 , 0 , 2759 , 160, 1 ), // #705
+ INST(Shld , X86ShldShrd , O(000F00,A4,_,_,x,_,_,_ ), 0 , 4 , 0 , 8603 , 169, 1 ), // #706
+ INST(Shlx , VexRmv_Wx , V(660F38,F7,_,0,x,_,_,_ ), 0 , 88 , 0 , 2763 , 13 , 78 ), // #707
+ INST(Shr , X86Rot , O(000000,D0,5,_,x,_,_,_ ), 0 , 58 , 0 , 2768 , 160, 1 ), // #708
+ INST(Shrd , X86ShldShrd , O(000F00,AC,_,_,x,_,_,_ ), 0 , 4 , 0 , 2772 , 169, 1 ), // #709
+ INST(Shrx , VexRmv_Wx , V(F20F38,F7,_,0,x,_,_,_ ), 0 , 77 , 0 , 2777 , 13 , 78 ), // #710
+ INST(Shufpd , ExtRmi , O(660F00,C6,_,_,_,_,_,_ ), 0 , 3 , 0 , 9828 , 8 , 4 ), // #711
+ INST(Shufps , ExtRmi , O(000F00,C6,_,_,_,_,_,_ ), 0 , 4 , 0 , 9836 , 8 , 5 ), // #712
+ INST(Sidt , X86M_Only , O(000F00,01,1,_,_,_,_,_ ), 0 , 27 , 0 , 2782 , 31 , 0 ), // #713
+ INST(Skinit , X86Op_xAX , O(000F01,DE,_,_,_,_,_,_ ), 0 , 21 , 0 , 2787 , 50 , 104), // #714
+ INST(Sldt , X86M , O(000F00,00,0,_,_,_,_,_ ), 0 , 4 , 0 , 2794 , 170, 0 ), // #715
+ INST(Slwpcb , VexR_Wx , V(XOP_M9,12,1,0,x,_,_,_ ), 0 , 11 , 0 , 2799 , 98 , 68 ), // #716
+ INST(Smsw , X86M , O(000F00,01,4,_,_,_,_,_ ), 0 , 89 , 0 , 2806 , 170, 0 ), // #717
+ INST(Sqrtpd , ExtRm , O(660F00,51,_,_,_,_,_,_ ), 0 , 3 , 0 , 9844 , 5 , 4 ), // #718
+ INST(Sqrtps , ExtRm , O(000F00,51,_,_,_,_,_,_ ), 0 , 4 , 0 , 9559 , 5 , 5 ), // #719
+ INST(Sqrtsd , ExtRm , O(F20F00,51,_,_,_,_,_,_ ), 0 , 5 , 0 , 9860 , 6 , 4 ), // #720
+ INST(Sqrtss , ExtRm , O(F30F00,51,_,_,_,_,_,_ ), 0 , 6 , 0 , 9568 , 7 , 5 ), // #721
+ INST(Stac , X86Op , O(000F01,CB,_,_,_,_,_,_ ), 0 , 21 , 0 , 2811 , 30 , 16 ), // #722
+ INST(Stc , X86Op , O(000000,F9,_,_,_,_,_,_ ), 0 , 0 , 0 , 2816 , 30 , 17 ), // #723
+ INST(Std , X86Op , O(000000,FD,_,_,_,_,_,_ ), 0 , 0 , 0 , 6586 , 30 , 18 ), // #724
+ INST(Stgi , X86Op , O(000F01,DC,_,_,_,_,_,_ ), 0 , 21 , 0 , 2820 , 30 , 104), // #725
+ INST(Sti , X86Op , O(000000,FB,_,_,_,_,_,_ ), 0 , 0 , 0 , 2825 , 30 , 23 ), // #726
+ INST(Stmxcsr , X86M_Only , O(000F00,AE,3,_,_,_,_,_ ), 0 , 71 , 0 , 9876 , 93 , 5 ), // #727
+ INST(Stos , X86StrMr , O(000000,AA,_,_,_,_,_,_ ), 0 , 0 , 0 , 2829 , 171, 69 ), // #728
+ INST(Str , X86M , O(000F00,00,1,_,_,_,_,_ ), 0 , 27 , 0 , 2834 , 170, 0 ), // #729
+ INST(Sub , X86Arith , O(000000,28,5,_,x,_,_,_ ), 0 , 58 , 0 , 836 , 166, 1 ), // #730
+ INST(Subpd , ExtRm , O(660F00,5C,_,_,_,_,_,_ ), 0 , 3 , 0 , 4556 , 5 , 4 ), // #731
+ INST(Subps , ExtRm , O(000F00,5C,_,_,_,_,_,_ ), 0 , 4 , 0 , 4568 , 5 , 5 ), // #732
+ INST(Subsd , ExtRm , O(F20F00,5C,_,_,_,_,_,_ ), 0 , 5 , 0 , 5244 , 6 , 4 ), // #733
+ INST(Subss , ExtRm , O(F30F00,5C,_,_,_,_,_,_ ), 0 , 6 , 0 , 5254 , 7 , 5 ), // #734
+ INST(Swapgs , X86Op , O(000F01,F8,_,_,_,_,_,_ ), 0 , 21 , 0 , 2838 , 155, 0 ), // #735
+ INST(Syscall , X86Op , O(000F00,05,_,_,_,_,_,_ ), 0 , 4 , 0 , 2845 , 155, 0 ), // #736
+ INST(Sysenter , X86Op , O(000F00,34,_,_,_,_,_,_ ), 0 , 4 , 0 , 2853 , 30 , 0 ), // #737
+ INST(Sysexit , X86Op , O(000F00,35,_,_,_,_,_,_ ), 0 , 4 , 0 , 2862 , 30 , 0 ), // #738
+ INST(Sysexit64 , X86Op , O(000F00,35,_,_,_,_,_,_ ), 0 , 4 , 0 , 2870 , 30 , 0 ), // #739
+ INST(Sysret , X86Op , O(000F00,07,_,_,_,_,_,_ ), 0 , 4 , 0 , 2880 , 155, 0 ), // #740
+ INST(Sysret64 , X86Op , O(000F00,07,_,_,_,_,_,_ ), 0 , 4 , 0 , 2887 , 155, 0 ), // #741
+ INST(T1mskc , VexVm_Wx , V(XOP_M9,01,7,0,x,_,_,_ ), 0 , 90 , 0 , 2896 , 14 , 11 ), // #742
+ INST(Test , X86Test , O(000000,84,_,_,x,_,_,_ ), O(000000,F6,_,_,x,_,_,_ ), 0 , 78 , 9032 , 172, 1 ), // #743
+ INST(Tzcnt , X86Rm_Raw66H , O(F30F00,BC,_,_,x,_,_,_ ), 0 , 6 , 0 , 2903 , 22 , 9 ), // #744
+ INST(Tzmsk , VexVm_Wx , V(XOP_M9,01,4,0,x,_,_,_ ), 0 , 91 , 0 , 2909 , 14 , 11 ), // #745
+ INST(Ucomisd , ExtRm , O(660F00,2E,_,_,_,_,_,_ ), 0 , 3 , 0 , 9929 , 6 , 39 ), // #746
+ INST(Ucomiss , ExtRm , O(000F00,2E,_,_,_,_,_,_ ), 0 , 4 , 0 , 9938 , 7 , 40 ), // #747
+ INST(Ud2 , X86Op , O(000F00,0B,_,_,_,_,_,_ ), 0 , 4 , 0 , 2915 , 30 , 0 ), // #748
+ INST(Unpckhpd , ExtRm , O(660F00,15,_,_,_,_,_,_ ), 0 , 3 , 0 , 9947 , 5 , 4 ), // #749
+ INST(Unpckhps , ExtRm , O(000F00,15,_,_,_,_,_,_ ), 0 , 4 , 0 , 9957 , 5 , 5 ), // #750
+ INST(Unpcklpd , ExtRm , O(660F00,14,_,_,_,_,_,_ ), 0 , 3 , 0 , 9967 , 5 , 4 ), // #751
+ INST(Unpcklps , ExtRm , O(000F00,14,_,_,_,_,_,_ ), 0 , 4 , 0 , 9977 , 5 , 5 ), // #752
+ INST(V4fmaddps , VexRm_T1_4X , E(F20F38,9A,_,2,_,0,2,T4X), 0 , 92 , 0 , 2919 , 173, 105), // #753
+ INST(V4fmaddss , VexRm_T1_4X , E(F20F38,9B,_,2,_,0,2,T4X), 0 , 92 , 0 , 2929 , 174, 105), // #754
+ INST(V4fnmaddps , VexRm_T1_4X , E(F20F38,AA,_,2,_,0,2,T4X), 0 , 92 , 0 , 2939 , 173, 105), // #755
+ INST(V4fnmaddss , VexRm_T1_4X , E(F20F38,AB,_,2,_,0,2,T4X), 0 , 92 , 0 , 2950 , 174, 105), // #756
+ INST(Vaddpd , VexRvm_Lx , V(660F00,58,_,x,I,1,4,FV ), 0 , 93 , 0 , 2961 , 175, 106), // #757
+ INST(Vaddps , VexRvm_Lx , V(000F00,58,_,x,I,0,4,FV ), 0 , 94 , 0 , 2968 , 176, 106), // #758
+ INST(Vaddsd , VexRvm , V(F20F00,58,_,I,I,1,3,T1S), 0 , 95 , 0 , 2975 , 177, 107), // #759
+ INST(Vaddss , VexRvm , V(F30F00,58,_,I,I,0,2,T1S), 0 , 96 , 0 , 2982 , 178, 107), // #760
+ INST(Vaddsubpd , VexRvm_Lx , V(660F00,D0,_,x,I,_,_,_ ), 0 , 63 , 0 , 2989 , 179, 108), // #761
+ INST(Vaddsubps , VexRvm_Lx , V(F20F00,D0,_,x,I,_,_,_ ), 0 , 97 , 0 , 2999 , 179, 108), // #762
+ INST(Vaesdec , VexRvm_Lx , V(660F38,DE,_,x,I,_,4,FVM), 0 , 98 , 0 , 3009 , 180, 109), // #763
+ INST(Vaesdeclast , VexRvm_Lx , V(660F38,DF,_,x,I,_,4,FVM), 0 , 98 , 0 , 3017 , 180, 109), // #764
+ INST(Vaesenc , VexRvm_Lx , V(660F38,DC,_,x,I,_,4,FVM), 0 , 98 , 0 , 3029 , 180, 109), // #765
+ INST(Vaesenclast , VexRvm_Lx , V(660F38,DD,_,x,I,_,4,FVM), 0 , 98 , 0 , 3037 , 180, 109), // #766
+ INST(Vaesimc , VexRm , V(660F38,DB,_,0,I,_,_,_ ), 0 , 88 , 0 , 3049 , 181, 110), // #767
+ INST(Vaeskeygenassist , VexRmi , V(660F3A,DF,_,0,I,_,_,_ ), 0 , 67 , 0 , 3057 , 182, 110), // #768
+ INST(Valignd , VexRvmi_Lx , E(660F3A,03,_,x,_,0,4,FV ), 0 , 99 , 0 , 3074 , 183, 111), // #769
+ INST(Valignq , VexRvmi_Lx , E(660F3A,03,_,x,_,1,4,FV ), 0 , 100, 0 , 3082 , 184, 111), // #770
+ INST(Vandnpd , VexRvm_Lx , V(660F00,55,_,x,I,1,4,FV ), 0 , 93 , 0 , 3090 , 185, 112), // #771
+ INST(Vandnps , VexRvm_Lx , V(000F00,55,_,x,I,0,4,FV ), 0 , 94 , 0 , 3098 , 186, 112), // #772
+ INST(Vandpd , VexRvm_Lx , V(660F00,54,_,x,I,1,4,FV ), 0 , 93 , 0 , 3106 , 187, 112), // #773
+ INST(Vandps , VexRvm_Lx , V(000F00,54,_,x,I,0,4,FV ), 0 , 94 , 0 , 3113 , 188, 112), // #774
+ INST(Vblendmb , VexRvm_Lx , E(660F38,66,_,x,_,0,4,FVM), 0 , 101, 0 , 3120 , 189, 113), // #775
+ INST(Vblendmd , VexRvm_Lx , E(660F38,64,_,x,_,0,4,FV ), 0 , 102, 0 , 3129 , 190, 111), // #776
+ INST(Vblendmpd , VexRvm_Lx , E(660F38,65,_,x,_,1,4,FV ), 0 , 103, 0 , 3138 , 191, 111), // #777
+ INST(Vblendmps , VexRvm_Lx , E(660F38,65,_,x,_,0,4,FV ), 0 , 102, 0 , 3148 , 190, 111), // #778
+ INST(Vblendmq , VexRvm_Lx , E(660F38,64,_,x,_,1,4,FV ), 0 , 103, 0 , 3158 , 191, 111), // #779
+ INST(Vblendmw , VexRvm_Lx , E(660F38,66,_,x,_,1,4,FVM), 0 , 104, 0 , 3167 , 189, 113), // #780
+ INST(Vblendpd , VexRvmi_Lx , V(660F3A,0D,_,x,I,_,_,_ ), 0 , 67 , 0 , 3176 , 192, 108), // #781
+ INST(Vblendps , VexRvmi_Lx , V(660F3A,0C,_,x,I,_,_,_ ), 0 , 67 , 0 , 3185 , 192, 108), // #782
+ INST(Vblendvpd , VexRvmr_Lx , V(660F3A,4B,_,x,0,_,_,_ ), 0 , 67 , 0 , 3194 , 193, 108), // #783
+ INST(Vblendvps , VexRvmr_Lx , V(660F3A,4A,_,x,0,_,_,_ ), 0 , 67 , 0 , 3204 , 193, 108), // #784
+ INST(Vbroadcastf128 , VexRm , V(660F38,1A,_,1,0,_,_,_ ), 0 , 105, 0 , 3214 , 194, 108), // #785
+ INST(Vbroadcastf32x2 , VexRm_Lx , E(660F38,19,_,x,_,0,3,T2 ), 0 , 106, 0 , 3229 , 195, 114), // #786
+ INST(Vbroadcastf32x4 , VexRm_Lx , E(660F38,1A,_,x,_,0,4,T4 ), 0 , 107, 0 , 3245 , 196, 63 ), // #787
+ INST(Vbroadcastf32x8 , VexRm , E(660F38,1B,_,2,_,0,5,T8 ), 0 , 108, 0 , 3261 , 197, 61 ), // #788
+ INST(Vbroadcastf64x2 , VexRm_Lx , E(660F38,1A,_,x,_,1,4,T2 ), 0 , 109, 0 , 3277 , 196, 114), // #789
+ INST(Vbroadcastf64x4 , VexRm , E(660F38,1B,_,2,_,1,5,T4 ), 0 , 110, 0 , 3293 , 197, 63 ), // #790
+ INST(Vbroadcasti128 , VexRm , V(660F38,5A,_,1,0,_,_,_ ), 0 , 105, 0 , 3309 , 194, 115), // #791
+ INST(Vbroadcasti32x2 , VexRm_Lx , E(660F38,59,_,x,_,0,3,T2 ), 0 , 106, 0 , 3324 , 198, 114), // #792
+ INST(Vbroadcasti32x4 , VexRm_Lx , E(660F38,5A,_,x,_,0,4,T4 ), 0 , 107, 0 , 3340 , 196, 111), // #793
+ INST(Vbroadcasti32x8 , VexRm , E(660F38,5B,_,2,_,0,5,T8 ), 0 , 108, 0 , 3356 , 197, 61 ), // #794
+ INST(Vbroadcasti64x2 , VexRm_Lx , E(660F38,5A,_,x,_,1,4,T2 ), 0 , 109, 0 , 3372 , 196, 114), // #795
+ INST(Vbroadcasti64x4 , VexRm , E(660F38,5B,_,2,_,1,5,T4 ), 0 , 110, 0 , 3388 , 197, 63 ), // #796
+ INST(Vbroadcastsd , VexRm_Lx , V(660F38,19,_,x,0,1,3,T1S), 0 , 111, 0 , 3404 , 199, 116), // #797
+ INST(Vbroadcastss , VexRm_Lx , V(660F38,18,_,x,0,0,2,T1S), 0 , 112, 0 , 3417 , 200, 116), // #798
+ INST(Vcmppd , VexRvmi_Lx , V(660F00,C2,_,x,I,1,4,FV ), 0 , 93 , 0 , 3430 , 201, 106), // #799
+ INST(Vcmpps , VexRvmi_Lx , V(000F00,C2,_,x,I,0,4,FV ), 0 , 94 , 0 , 3437 , 202, 106), // #800
+ INST(Vcmpsd , VexRvmi , V(F20F00,C2,_,I,I,1,3,T1S), 0 , 95 , 0 , 3444 , 203, 107), // #801
+ INST(Vcmpss , VexRvmi , V(F30F00,C2,_,I,I,0,2,T1S), 0 , 96 , 0 , 3451 , 204, 107), // #802
+ INST(Vcomisd , VexRm , V(660F00,2F,_,I,I,1,3,T1S), 0 , 113, 0 , 3458 , 205, 117), // #803
+ INST(Vcomiss , VexRm , V(000F00,2F,_,I,I,0,2,T1S), 0 , 114, 0 , 3466 , 206, 117), // #804
+ INST(Vcompresspd , VexMr_Lx , E(660F38,8A,_,x,_,1,3,T1S), 0 , 115, 0 , 3474 , 207, 111), // #805
+ INST(Vcompressps , VexMr_Lx , E(660F38,8A,_,x,_,0,2,T1S), 0 , 116, 0 , 3486 , 207, 111), // #806
+ INST(Vcvtdq2pd , VexRm_Lx , V(F30F00,E6,_,x,I,0,3,HV ), 0 , 117, 0 , 3498 , 208, 106), // #807
+ INST(Vcvtdq2ps , VexRm_Lx , V(000F00,5B,_,x,I,0,4,FV ), 0 , 94 , 0 , 3508 , 209, 106), // #808
+ INST(Vcvtne2ps2bf16 , VexRvm , E(F20F38,72,_,_,_,0,_,_ ), 0 , 118, 0 , 3518 , 190, 118), // #809
+ INST(Vcvtneps2bf16 , VexRm , E(F30F38,72,_,_,_,0,_,_ ), 0 , 119, 0 , 3533 , 210, 118), // #810
+ INST(Vcvtpd2dq , VexRm_Lx , V(F20F00,E6,_,x,I,1,4,FV ), 0 , 120, 0 , 3547 , 211, 106), // #811
+ INST(Vcvtpd2ps , VexRm_Lx , V(660F00,5A,_,x,I,1,4,FV ), 0 , 93 , 0 , 3557 , 211, 106), // #812
+ INST(Vcvtpd2qq , VexRm_Lx , E(660F00,7B,_,x,_,1,4,FV ), 0 , 121, 0 , 3567 , 212, 114), // #813
+ INST(Vcvtpd2udq , VexRm_Lx , E(000F00,79,_,x,_,1,4,FV ), 0 , 122, 0 , 3577 , 213, 111), // #814
+ INST(Vcvtpd2uqq , VexRm_Lx , E(660F00,79,_,x,_,1,4,FV ), 0 , 121, 0 , 3588 , 212, 114), // #815
+ INST(Vcvtph2ps , VexRm_Lx , V(660F38,13,_,x,0,0,3,HVM), 0 , 123, 0 , 3599 , 214, 119), // #816
+ INST(Vcvtps2dq , VexRm_Lx , V(660F00,5B,_,x,I,0,4,FV ), 0 , 124, 0 , 3609 , 209, 106), // #817
+ INST(Vcvtps2pd , VexRm_Lx , V(000F00,5A,_,x,I,0,4,HV ), 0 , 125, 0 , 3619 , 215, 106), // #818
+ INST(Vcvtps2ph , VexMri_Lx , V(660F3A,1D,_,x,0,0,3,HVM), 0 , 126, 0 , 3629 , 216, 119), // #819
+ INST(Vcvtps2qq , VexRm_Lx , E(660F00,7B,_,x,_,0,3,HV ), 0 , 127, 0 , 3639 , 217, 114), // #820
+ INST(Vcvtps2udq , VexRm_Lx , E(000F00,79,_,x,_,0,4,FV ), 0 , 128, 0 , 3649 , 218, 111), // #821
+ INST(Vcvtps2uqq , VexRm_Lx , E(660F00,79,_,x,_,0,3,HV ), 0 , 127, 0 , 3660 , 217, 114), // #822
+ INST(Vcvtqq2pd , VexRm_Lx , E(F30F00,E6,_,x,_,1,4,FV ), 0 , 129, 0 , 3671 , 212, 114), // #823
+ INST(Vcvtqq2ps , VexRm_Lx , E(000F00,5B,_,x,_,1,4,FV ), 0 , 122, 0 , 3681 , 213, 114), // #824
+ INST(Vcvtsd2si , VexRm_Wx , V(F20F00,2D,_,I,x,x,3,T1F), 0 , 130, 0 , 3691 , 219, 107), // #825
+ INST(Vcvtsd2ss , VexRvm , V(F20F00,5A,_,I,I,1,3,T1S), 0 , 95 , 0 , 3701 , 177, 107), // #826
+ INST(Vcvtsd2usi , VexRm_Wx , E(F20F00,79,_,I,_,x,3,T1F), 0 , 131, 0 , 3711 , 220, 63 ), // #827
+ INST(Vcvtsi2sd , VexRvm_Wx , V(F20F00,2A,_,I,x,x,2,T1W), 0 , 132, 0 , 3722 , 221, 107), // #828
+ INST(Vcvtsi2ss , VexRvm_Wx , V(F30F00,2A,_,I,x,x,2,T1W), 0 , 133, 0 , 3732 , 221, 107), // #829
+ INST(Vcvtss2sd , VexRvm , V(F30F00,5A,_,I,I,0,2,T1S), 0 , 96 , 0 , 3742 , 222, 107), // #830
+ INST(Vcvtss2si , VexRm_Wx , V(F30F00,2D,_,I,x,x,2,T1F), 0 , 134, 0 , 3752 , 223, 107), // #831
+ INST(Vcvtss2usi , VexRm_Wx , E(F30F00,79,_,I,_,x,2,T1F), 0 , 135, 0 , 3762 , 224, 63 ), // #832
+ INST(Vcvttpd2dq , VexRm_Lx , V(660F00,E6,_,x,I,1,4,FV ), 0 , 93 , 0 , 3773 , 225, 106), // #833
+ INST(Vcvttpd2qq , VexRm_Lx , E(660F00,7A,_,x,_,1,4,FV ), 0 , 121, 0 , 3784 , 226, 111), // #834
+ INST(Vcvttpd2udq , VexRm_Lx , E(000F00,78,_,x,_,1,4,FV ), 0 , 122, 0 , 3795 , 227, 111), // #835
+ INST(Vcvttpd2uqq , VexRm_Lx , E(660F00,78,_,x,_,1,4,FV ), 0 , 121, 0 , 3807 , 226, 114), // #836
+ INST(Vcvttps2dq , VexRm_Lx , V(F30F00,5B,_,x,I,0,4,FV ), 0 , 136, 0 , 3819 , 228, 106), // #837
+ INST(Vcvttps2qq , VexRm_Lx , E(660F00,7A,_,x,_,0,3,HV ), 0 , 127, 0 , 3830 , 229, 114), // #838
+ INST(Vcvttps2udq , VexRm_Lx , E(000F00,78,_,x,_,0,4,FV ), 0 , 128, 0 , 3841 , 230, 111), // #839
+ INST(Vcvttps2uqq , VexRm_Lx , E(660F00,78,_,x,_,0,3,HV ), 0 , 127, 0 , 3853 , 229, 114), // #840
+ INST(Vcvttsd2si , VexRm_Wx , V(F20F00,2C,_,I,x,x,3,T1F), 0 , 130, 0 , 3865 , 231, 107), // #841
+ INST(Vcvttsd2usi , VexRm_Wx , E(F20F00,78,_,I,_,x,3,T1F), 0 , 131, 0 , 3876 , 232, 63 ), // #842
+ INST(Vcvttss2si , VexRm_Wx , V(F30F00,2C,_,I,x,x,2,T1F), 0 , 134, 0 , 3888 , 233, 107), // #843
+ INST(Vcvttss2usi , VexRm_Wx , E(F30F00,78,_,I,_,x,2,T1F), 0 , 135, 0 , 3899 , 234, 63 ), // #844
+ INST(Vcvtudq2pd , VexRm_Lx , E(F30F00,7A,_,x,_,0,3,HV ), 0 , 137, 0 , 3911 , 235, 111), // #845
+ INST(Vcvtudq2ps , VexRm_Lx , E(F20F00,7A,_,x,_,0,4,FV ), 0 , 138, 0 , 3922 , 218, 111), // #846
+ INST(Vcvtuqq2pd , VexRm_Lx , E(F30F00,7A,_,x,_,1,4,FV ), 0 , 129, 0 , 3933 , 212, 114), // #847
+ INST(Vcvtuqq2ps , VexRm_Lx , E(F20F00,7A,_,x,_,1,4,FV ), 0 , 139, 0 , 3944 , 213, 114), // #848
+ INST(Vcvtusi2sd , VexRvm_Wx , E(F20F00,7B,_,I,_,x,2,T1W), 0 , 140, 0 , 3955 , 236, 63 ), // #849
+ INST(Vcvtusi2ss , VexRvm_Wx , E(F30F00,7B,_,I,_,x,2,T1W), 0 , 141, 0 , 3966 , 236, 63 ), // #850
+ INST(Vdbpsadbw , VexRvmi_Lx , E(660F3A,42,_,x,_,0,4,FVM), 0 , 142, 0 , 3977 , 237, 113), // #851
+ INST(Vdivpd , VexRvm_Lx , V(660F00,5E,_,x,I,1,4,FV ), 0 , 93 , 0 , 3987 , 175, 106), // #852
+ INST(Vdivps , VexRvm_Lx , V(000F00,5E,_,x,I,0,4,FV ), 0 , 94 , 0 , 3994 , 176, 106), // #853
+ INST(Vdivsd , VexRvm , V(F20F00,5E,_,I,I,1,3,T1S), 0 , 95 , 0 , 4001 , 177, 107), // #854
+ INST(Vdivss , VexRvm , V(F30F00,5E,_,I,I,0,2,T1S), 0 , 96 , 0 , 4008 , 178, 107), // #855
+ INST(Vdpbf16ps , VexRvm , E(F30F38,52,_,_,_,0,_,_ ), 0 , 119, 0 , 4015 , 190, 118), // #856
+ INST(Vdppd , VexRvmi_Lx , V(660F3A,41,_,x,I,_,_,_ ), 0 , 67 , 0 , 4025 , 238, 108), // #857
+ INST(Vdpps , VexRvmi_Lx , V(660F3A,40,_,x,I,_,_,_ ), 0 , 67 , 0 , 4031 , 192, 108), // #858
+ INST(Verr , X86M_NoSize , O(000F00,00,4,_,_,_,_,_ ), 0 , 89 , 0 , 4037 , 97 , 10 ), // #859
+ INST(Verw , X86M_NoSize , O(000F00,00,5,_,_,_,_,_ ), 0 , 70 , 0 , 4042 , 97 , 10 ), // #860
+ INST(Vexp2pd , VexRm , E(660F38,C8,_,2,_,1,4,FV ), 0 , 143, 0 , 4047 , 239, 120), // #861
+ INST(Vexp2ps , VexRm , E(660F38,C8,_,2,_,0,4,FV ), 0 , 144, 0 , 4055 , 240, 120), // #862
+ INST(Vexpandpd , VexRm_Lx , E(660F38,88,_,x,_,1,3,T1S), 0 , 115, 0 , 4063 , 241, 111), // #863
+ INST(Vexpandps , VexRm_Lx , E(660F38,88,_,x,_,0,2,T1S), 0 , 116, 0 , 4073 , 241, 111), // #864
+ INST(Vextractf128 , VexMri , V(660F3A,19,_,1,0,_,_,_ ), 0 , 145, 0 , 4083 , 242, 108), // #865
+ INST(Vextractf32x4 , VexMri_Lx , E(660F3A,19,_,x,_,0,4,T4 ), 0 , 146, 0 , 4096 , 243, 111), // #866
+ INST(Vextractf32x8 , VexMri , E(660F3A,1B,_,2,_,0,5,T8 ), 0 , 147, 0 , 4110 , 244, 61 ), // #867
+ INST(Vextractf64x2 , VexMri_Lx , E(660F3A,19,_,x,_,1,4,T2 ), 0 , 148, 0 , 4124 , 243, 114), // #868
+ INST(Vextractf64x4 , VexMri , E(660F3A,1B,_,2,_,1,5,T4 ), 0 , 149, 0 , 4138 , 244, 63 ), // #869
+ INST(Vextracti128 , VexMri , V(660F3A,39,_,1,0,_,_,_ ), 0 , 145, 0 , 4152 , 242, 115), // #870
+ INST(Vextracti32x4 , VexMri_Lx , E(660F3A,39,_,x,_,0,4,T4 ), 0 , 146, 0 , 4165 , 243, 111), // #871
+ INST(Vextracti32x8 , VexMri , E(660F3A,3B,_,2,_,0,5,T8 ), 0 , 147, 0 , 4179 , 244, 61 ), // #872
+ INST(Vextracti64x2 , VexMri_Lx , E(660F3A,39,_,x,_,1,4,T2 ), 0 , 148, 0 , 4193 , 243, 114), // #873
+ INST(Vextracti64x4 , VexMri , E(660F3A,3B,_,2,_,1,5,T4 ), 0 , 149, 0 , 4207 , 244, 63 ), // #874
+ INST(Vextractps , VexMri , V(660F3A,17,_,0,I,I,2,T1S), 0 , 150, 0 , 4221 , 245, 107), // #875
+ INST(Vfixupimmpd , VexRvmi_Lx , E(660F3A,54,_,x,_,1,4,FV ), 0 , 100, 0 , 4232 , 246, 111), // #876
+ INST(Vfixupimmps , VexRvmi_Lx , E(660F3A,54,_,x,_,0,4,FV ), 0 , 99 , 0 , 4244 , 247, 111), // #877
+ INST(Vfixupimmsd , VexRvmi , E(660F3A,55,_,I,_,1,3,T1S), 0 , 151, 0 , 4256 , 248, 63 ), // #878
+ INST(Vfixupimmss , VexRvmi , E(660F3A,55,_,I,_,0,2,T1S), 0 , 152, 0 , 4268 , 249, 63 ), // #879
+ INST(Vfmadd132pd , VexRvm_Lx , V(660F38,98,_,x,1,1,4,FV ), 0 , 153, 0 , 4280 , 175, 121), // #880
+ INST(Vfmadd132ps , VexRvm_Lx , V(660F38,98,_,x,0,0,4,FV ), 0 , 154, 0 , 4292 , 176, 121), // #881
+ INST(Vfmadd132sd , VexRvm , V(660F38,99,_,I,1,1,3,T1S), 0 , 155, 0 , 4304 , 177, 122), // #882
+ INST(Vfmadd132ss , VexRvm , V(660F38,99,_,I,0,0,2,T1S), 0 , 112, 0 , 4316 , 178, 122), // #883
+ INST(Vfmadd213pd , VexRvm_Lx , V(660F38,A8,_,x,1,1,4,FV ), 0 , 153, 0 , 4328 , 175, 121), // #884
+ INST(Vfmadd213ps , VexRvm_Lx , V(660F38,A8,_,x,0,0,4,FV ), 0 , 154, 0 , 4340 , 176, 121), // #885
+ INST(Vfmadd213sd , VexRvm , V(660F38,A9,_,I,1,1,3,T1S), 0 , 155, 0 , 4352 , 177, 122), // #886
+ INST(Vfmadd213ss , VexRvm , V(660F38,A9,_,I,0,0,2,T1S), 0 , 112, 0 , 4364 , 178, 122), // #887
+ INST(Vfmadd231pd , VexRvm_Lx , V(660F38,B8,_,x,1,1,4,FV ), 0 , 153, 0 , 4376 , 175, 121), // #888
+ INST(Vfmadd231ps , VexRvm_Lx , V(660F38,B8,_,x,0,0,4,FV ), 0 , 154, 0 , 4388 , 176, 121), // #889
+ INST(Vfmadd231sd , VexRvm , V(660F38,B9,_,I,1,1,3,T1S), 0 , 155, 0 , 4400 , 177, 122), // #890
+ INST(Vfmadd231ss , VexRvm , V(660F38,B9,_,I,0,0,2,T1S), 0 , 112, 0 , 4412 , 178, 122), // #891
+ INST(Vfmaddpd , Fma4_Lx , V(660F3A,69,_,x,x,_,_,_ ), 0 , 67 , 0 , 4424 , 250, 123), // #892
+ INST(Vfmaddps , Fma4_Lx , V(660F3A,68,_,x,x,_,_,_ ), 0 , 67 , 0 , 4433 , 250, 123), // #893
+ INST(Vfmaddsd , Fma4 , V(660F3A,6B,_,0,x,_,_,_ ), 0 , 67 , 0 , 4442 , 251, 123), // #894
+ INST(Vfmaddss , Fma4 , V(660F3A,6A,_,0,x,_,_,_ ), 0 , 67 , 0 , 4451 , 252, 123), // #895
+ INST(Vfmaddsub132pd , VexRvm_Lx , V(660F38,96,_,x,1,1,4,FV ), 0 , 153, 0 , 4460 , 175, 121), // #896
+ INST(Vfmaddsub132ps , VexRvm_Lx , V(660F38,96,_,x,0,0,4,FV ), 0 , 154, 0 , 4475 , 176, 121), // #897
+ INST(Vfmaddsub213pd , VexRvm_Lx , V(660F38,A6,_,x,1,1,4,FV ), 0 , 153, 0 , 4490 , 175, 121), // #898
+ INST(Vfmaddsub213ps , VexRvm_Lx , V(660F38,A6,_,x,0,0,4,FV ), 0 , 154, 0 , 4505 , 176, 121), // #899
+ INST(Vfmaddsub231pd , VexRvm_Lx , V(660F38,B6,_,x,1,1,4,FV ), 0 , 153, 0 , 4520 , 175, 121), // #900
+ INST(Vfmaddsub231ps , VexRvm_Lx , V(660F38,B6,_,x,0,0,4,FV ), 0 , 154, 0 , 4535 , 176, 121), // #901
+ INST(Vfmaddsubpd , Fma4_Lx , V(660F3A,5D,_,x,x,_,_,_ ), 0 , 67 , 0 , 4550 , 250, 123), // #902
+ INST(Vfmaddsubps , Fma4_Lx , V(660F3A,5C,_,x,x,_,_,_ ), 0 , 67 , 0 , 4562 , 250, 123), // #903
+ INST(Vfmsub132pd , VexRvm_Lx , V(660F38,9A,_,x,1,1,4,FV ), 0 , 153, 0 , 4574 , 175, 121), // #904
+ INST(Vfmsub132ps , VexRvm_Lx , V(660F38,9A,_,x,0,0,4,FV ), 0 , 154, 0 , 4586 , 176, 121), // #905
+ INST(Vfmsub132sd , VexRvm , V(660F38,9B,_,I,1,1,3,T1S), 0 , 155, 0 , 4598 , 177, 122), // #906
+ INST(Vfmsub132ss , VexRvm , V(660F38,9B,_,I,0,0,2,T1S), 0 , 112, 0 , 4610 , 178, 122), // #907
+ INST(Vfmsub213pd , VexRvm_Lx , V(660F38,AA,_,x,1,1,4,FV ), 0 , 153, 0 , 4622 , 175, 121), // #908
+ INST(Vfmsub213ps , VexRvm_Lx , V(660F38,AA,_,x,0,0,4,FV ), 0 , 154, 0 , 4634 , 176, 121), // #909
+ INST(Vfmsub213sd , VexRvm , V(660F38,AB,_,I,1,1,3,T1S), 0 , 155, 0 , 4646 , 177, 122), // #910
+ INST(Vfmsub213ss , VexRvm , V(660F38,AB,_,I,0,0,2,T1S), 0 , 112, 0 , 4658 , 178, 122), // #911
+ INST(Vfmsub231pd , VexRvm_Lx , V(660F38,BA,_,x,1,1,4,FV ), 0 , 153, 0 , 4670 , 175, 121), // #912
+ INST(Vfmsub231ps , VexRvm_Lx , V(660F38,BA,_,x,0,0,4,FV ), 0 , 154, 0 , 4682 , 176, 121), // #913
+ INST(Vfmsub231sd , VexRvm , V(660F38,BB,_,I,1,1,3,T1S), 0 , 155, 0 , 4694 , 177, 122), // #914
+ INST(Vfmsub231ss , VexRvm , V(660F38,BB,_,I,0,0,2,T1S), 0 , 112, 0 , 4706 , 178, 122), // #915
+ INST(Vfmsubadd132pd , VexRvm_Lx , V(660F38,97,_,x,1,1,4,FV ), 0 , 153, 0 , 4718 , 175, 121), // #916
+ INST(Vfmsubadd132ps , VexRvm_Lx , V(660F38,97,_,x,0,0,4,FV ), 0 , 154, 0 , 4733 , 176, 121), // #917
+ INST(Vfmsubadd213pd , VexRvm_Lx , V(660F38,A7,_,x,1,1,4,FV ), 0 , 153, 0 , 4748 , 175, 121), // #918
+ INST(Vfmsubadd213ps , VexRvm_Lx , V(660F38,A7,_,x,0,0,4,FV ), 0 , 154, 0 , 4763 , 176, 121), // #919
+ INST(Vfmsubadd231pd , VexRvm_Lx , V(660F38,B7,_,x,1,1,4,FV ), 0 , 153, 0 , 4778 , 175, 121), // #920
+ INST(Vfmsubadd231ps , VexRvm_Lx , V(660F38,B7,_,x,0,0,4,FV ), 0 , 154, 0 , 4793 , 176, 121), // #921
+ INST(Vfmsubaddpd , Fma4_Lx , V(660F3A,5F,_,x,x,_,_,_ ), 0 , 67 , 0 , 4808 , 250, 123), // #922
+ INST(Vfmsubaddps , Fma4_Lx , V(660F3A,5E,_,x,x,_,_,_ ), 0 , 67 , 0 , 4820 , 250, 123), // #923
+ INST(Vfmsubpd , Fma4_Lx , V(660F3A,6D,_,x,x,_,_,_ ), 0 , 67 , 0 , 4832 , 250, 123), // #924
+ INST(Vfmsubps , Fma4_Lx , V(660F3A,6C,_,x,x,_,_,_ ), 0 , 67 , 0 , 4841 , 250, 123), // #925
+ INST(Vfmsubsd , Fma4 , V(660F3A,6F,_,0,x,_,_,_ ), 0 , 67 , 0 , 4850 , 251, 123), // #926
+ INST(Vfmsubss , Fma4 , V(660F3A,6E,_,0,x,_,_,_ ), 0 , 67 , 0 , 4859 , 252, 123), // #927
+ INST(Vfnmadd132pd , VexRvm_Lx , V(660F38,9C,_,x,1,1,4,FV ), 0 , 153, 0 , 4868 , 175, 121), // #928
+ INST(Vfnmadd132ps , VexRvm_Lx , V(660F38,9C,_,x,0,0,4,FV ), 0 , 154, 0 , 4881 , 176, 121), // #929
+ INST(Vfnmadd132sd , VexRvm , V(660F38,9D,_,I,1,1,3,T1S), 0 , 155, 0 , 4894 , 177, 122), // #930
+ INST(Vfnmadd132ss , VexRvm , V(660F38,9D,_,I,0,0,2,T1S), 0 , 112, 0 , 4907 , 178, 122), // #931
+ INST(Vfnmadd213pd , VexRvm_Lx , V(660F38,AC,_,x,1,1,4,FV ), 0 , 153, 0 , 4920 , 175, 121), // #932
+ INST(Vfnmadd213ps , VexRvm_Lx , V(660F38,AC,_,x,0,0,4,FV ), 0 , 154, 0 , 4933 , 176, 121), // #933
+ INST(Vfnmadd213sd , VexRvm , V(660F38,AD,_,I,1,1,3,T1S), 0 , 155, 0 , 4946 , 177, 122), // #934
+ INST(Vfnmadd213ss , VexRvm , V(660F38,AD,_,I,0,0,2,T1S), 0 , 112, 0 , 4959 , 178, 122), // #935
+ INST(Vfnmadd231pd , VexRvm_Lx , V(660F38,BC,_,x,1,1,4,FV ), 0 , 153, 0 , 4972 , 175, 121), // #936
+ INST(Vfnmadd231ps , VexRvm_Lx , V(660F38,BC,_,x,0,0,4,FV ), 0 , 154, 0 , 4985 , 176, 121), // #937
+ INST(Vfnmadd231sd , VexRvm , V(660F38,BC,_,I,1,1,3,T1S), 0 , 155, 0 , 4998 , 177, 122), // #938
+ INST(Vfnmadd231ss , VexRvm , V(660F38,BC,_,I,0,0,2,T1S), 0 , 112, 0 , 5011 , 178, 122), // #939
+ INST(Vfnmaddpd , Fma4_Lx , V(660F3A,79,_,x,x,_,_,_ ), 0 , 67 , 0 , 5024 , 250, 123), // #940
+ INST(Vfnmaddps , Fma4_Lx , V(660F3A,78,_,x,x,_,_,_ ), 0 , 67 , 0 , 5034 , 250, 123), // #941
+ INST(Vfnmaddsd , Fma4 , V(660F3A,7B,_,0,x,_,_,_ ), 0 , 67 , 0 , 5044 , 251, 123), // #942
+ INST(Vfnmaddss , Fma4 , V(660F3A,7A,_,0,x,_,_,_ ), 0 , 67 , 0 , 5054 , 252, 123), // #943
+ INST(Vfnmsub132pd , VexRvm_Lx , V(660F38,9E,_,x,1,1,4,FV ), 0 , 153, 0 , 5064 , 175, 121), // #944
+ INST(Vfnmsub132ps , VexRvm_Lx , V(660F38,9E,_,x,0,0,4,FV ), 0 , 154, 0 , 5077 , 176, 121), // #945
+ INST(Vfnmsub132sd , VexRvm , V(660F38,9F,_,I,1,1,3,T1S), 0 , 155, 0 , 5090 , 177, 122), // #946
+ INST(Vfnmsub132ss , VexRvm , V(660F38,9F,_,I,0,0,2,T1S), 0 , 112, 0 , 5103 , 178, 122), // #947
+ INST(Vfnmsub213pd , VexRvm_Lx , V(660F38,AE,_,x,1,1,4,FV ), 0 , 153, 0 , 5116 , 175, 121), // #948
+ INST(Vfnmsub213ps , VexRvm_Lx , V(660F38,AE,_,x,0,0,4,FV ), 0 , 154, 0 , 5129 , 176, 121), // #949
+ INST(Vfnmsub213sd , VexRvm , V(660F38,AF,_,I,1,1,3,T1S), 0 , 155, 0 , 5142 , 177, 122), // #950
+ INST(Vfnmsub213ss , VexRvm , V(660F38,AF,_,I,0,0,2,T1S), 0 , 112, 0 , 5155 , 178, 122), // #951
+ INST(Vfnmsub231pd , VexRvm_Lx , V(660F38,BE,_,x,1,1,4,FV ), 0 , 153, 0 , 5168 , 175, 121), // #952
+ INST(Vfnmsub231ps , VexRvm_Lx , V(660F38,BE,_,x,0,0,4,FV ), 0 , 154, 0 , 5181 , 176, 121), // #953
+ INST(Vfnmsub231sd , VexRvm , V(660F38,BF,_,I,1,1,3,T1S), 0 , 155, 0 , 5194 , 177, 122), // #954
+ INST(Vfnmsub231ss , VexRvm , V(660F38,BF,_,I,0,0,2,T1S), 0 , 112, 0 , 5207 , 178, 122), // #955
+ INST(Vfnmsubpd , Fma4_Lx , V(660F3A,7D,_,x,x,_,_,_ ), 0 , 67 , 0 , 5220 , 250, 123), // #956
+ INST(Vfnmsubps , Fma4_Lx , V(660F3A,7C,_,x,x,_,_,_ ), 0 , 67 , 0 , 5230 , 250, 123), // #957
+ INST(Vfnmsubsd , Fma4 , V(660F3A,7F,_,0,x,_,_,_ ), 0 , 67 , 0 , 5240 , 251, 123), // #958
+ INST(Vfnmsubss , Fma4 , V(660F3A,7E,_,0,x,_,_,_ ), 0 , 67 , 0 , 5250 , 252, 123), // #959
+ INST(Vfpclasspd , VexRmi_Lx , E(660F3A,66,_,x,_,1,4,FV ), 0 , 100, 0 , 5260 , 253, 114), // #960
+ INST(Vfpclassps , VexRmi_Lx , E(660F3A,66,_,x,_,0,4,FV ), 0 , 99 , 0 , 5271 , 254, 114), // #961
+ INST(Vfpclasssd , VexRmi_Lx , E(660F3A,67,_,I,_,1,3,T1S), 0 , 151, 0 , 5282 , 255, 61 ), // #962
+ INST(Vfpclassss , VexRmi_Lx , E(660F3A,67,_,I,_,0,2,T1S), 0 , 152, 0 , 5293 , 256, 61 ), // #963
+ INST(Vfrczpd , VexRm_Lx , V(XOP_M9,81,_,x,0,_,_,_ ), 0 , 72 , 0 , 5304 , 257, 124), // #964
+ INST(Vfrczps , VexRm_Lx , V(XOP_M9,80,_,x,0,_,_,_ ), 0 , 72 , 0 , 5312 , 257, 124), // #965
+ INST(Vfrczsd , VexRm , V(XOP_M9,83,_,0,0,_,_,_ ), 0 , 72 , 0 , 5320 , 258, 124), // #966
+ INST(Vfrczss , VexRm , V(XOP_M9,82,_,0,0,_,_,_ ), 0 , 72 , 0 , 5328 , 259, 124), // #967
+ INST(Vgatherdpd , VexRmvRm_VM , V(660F38,92,_,x,1,_,_,_ ), V(660F38,92,_,x,_,1,3,T1S), 156, 79 , 5336 , 260, 125), // #968
+ INST(Vgatherdps , VexRmvRm_VM , V(660F38,92,_,x,0,_,_,_ ), V(660F38,92,_,x,_,0,2,T1S), 88 , 80 , 5347 , 261, 125), // #969
+ INST(Vgatherpf0dpd , VexM_VM , E(660F38,C6,1,2,_,1,3,T1S), 0 , 157, 0 , 5358 , 262, 126), // #970
+ INST(Vgatherpf0dps , VexM_VM , E(660F38,C6,1,2,_,0,2,T1S), 0 , 158, 0 , 5372 , 263, 126), // #971
+ INST(Vgatherpf0qpd , VexM_VM , E(660F38,C7,1,2,_,1,3,T1S), 0 , 157, 0 , 5386 , 264, 126), // #972
+ INST(Vgatherpf0qps , VexM_VM , E(660F38,C7,1,2,_,0,2,T1S), 0 , 158, 0 , 5400 , 264, 126), // #973
+ INST(Vgatherpf1dpd , VexM_VM , E(660F38,C6,2,2,_,1,3,T1S), 0 , 159, 0 , 5414 , 262, 126), // #974
+ INST(Vgatherpf1dps , VexM_VM , E(660F38,C6,2,2,_,0,2,T1S), 0 , 160, 0 , 5428 , 263, 126), // #975
+ INST(Vgatherpf1qpd , VexM_VM , E(660F38,C7,2,2,_,1,3,T1S), 0 , 159, 0 , 5442 , 264, 126), // #976
+ INST(Vgatherpf1qps , VexM_VM , E(660F38,C7,2,2,_,0,2,T1S), 0 , 160, 0 , 5456 , 264, 126), // #977
+ INST(Vgatherqpd , VexRmvRm_VM , V(660F38,93,_,x,1,_,_,_ ), V(660F38,93,_,x,_,1,3,T1S), 156, 81 , 5470 , 265, 125), // #978
+ INST(Vgatherqps , VexRmvRm_VM , V(660F38,93,_,x,0,_,_,_ ), V(660F38,93,_,x,_,0,2,T1S), 88 , 82 , 5481 , 266, 125), // #979
+ INST(Vgetexppd , VexRm_Lx , E(660F38,42,_,x,_,1,4,FV ), 0 , 103, 0 , 5492 , 226, 111), // #980
+ INST(Vgetexpps , VexRm_Lx , E(660F38,42,_,x,_,0,4,FV ), 0 , 102, 0 , 5502 , 230, 111), // #981
+ INST(Vgetexpsd , VexRvm , E(660F38,43,_,I,_,1,3,T1S), 0 , 115, 0 , 5512 , 267, 63 ), // #982
+ INST(Vgetexpss , VexRvm , E(660F38,43,_,I,_,0,2,T1S), 0 , 116, 0 , 5522 , 268, 63 ), // #983
+ INST(Vgetmantpd , VexRmi_Lx , E(660F3A,26,_,x,_,1,4,FV ), 0 , 100, 0 , 5532 , 269, 111), // #984
+ INST(Vgetmantps , VexRmi_Lx , E(660F3A,26,_,x,_,0,4,FV ), 0 , 99 , 0 , 5543 , 270, 111), // #985
+ INST(Vgetmantsd , VexRvmi , E(660F3A,27,_,I,_,1,3,T1S), 0 , 151, 0 , 5554 , 248, 63 ), // #986
+ INST(Vgetmantss , VexRvmi , E(660F3A,27,_,I,_,0,2,T1S), 0 , 152, 0 , 5565 , 249, 63 ), // #987
+ INST(Vgf2p8affineinvqb, VexRvmi_Lx , V(660F3A,CF,_,x,1,1,4,FV ), 0 , 161, 0 , 5576 , 271, 127), // #988
+ INST(Vgf2p8affineqb , VexRvmi_Lx , V(660F3A,CE,_,x,1,1,4,FV ), 0 , 161, 0 , 5594 , 271, 127), // #989
+ INST(Vgf2p8mulb , VexRvm_Lx , V(660F38,CF,_,x,0,0,4,FV ), 0 , 154, 0 , 5609 , 272, 127), // #990
+ INST(Vhaddpd , VexRvm_Lx , V(660F00,7C,_,x,I,_,_,_ ), 0 , 63 , 0 , 5620 , 179, 108), // #991
+ INST(Vhaddps , VexRvm_Lx , V(F20F00,7C,_,x,I,_,_,_ ), 0 , 97 , 0 , 5628 , 179, 108), // #992
+ INST(Vhsubpd , VexRvm_Lx , V(660F00,7D,_,x,I,_,_,_ ), 0 , 63 , 0 , 5636 , 179, 108), // #993
+ INST(Vhsubps , VexRvm_Lx , V(F20F00,7D,_,x,I,_,_,_ ), 0 , 97 , 0 , 5644 , 179, 108), // #994
+ INST(Vinsertf128 , VexRvmi , V(660F3A,18,_,1,0,_,_,_ ), 0 , 145, 0 , 5652 , 273, 108), // #995
+ INST(Vinsertf32x4 , VexRvmi_Lx , E(660F3A,18,_,x,_,0,4,T4 ), 0 , 146, 0 , 5664 , 274, 111), // #996
+ INST(Vinsertf32x8 , VexRvmi , E(660F3A,1A,_,2,_,0,5,T8 ), 0 , 147, 0 , 5677 , 275, 61 ), // #997
+ INST(Vinsertf64x2 , VexRvmi_Lx , E(660F3A,18,_,x,_,1,4,T2 ), 0 , 148, 0 , 5690 , 274, 114), // #998
+ INST(Vinsertf64x4 , VexRvmi , E(660F3A,1A,_,2,_,1,5,T4 ), 0 , 149, 0 , 5703 , 275, 63 ), // #999
+ INST(Vinserti128 , VexRvmi , V(660F3A,38,_,1,0,_,_,_ ), 0 , 145, 0 , 5716 , 273, 115), // #1000
+ INST(Vinserti32x4 , VexRvmi_Lx , E(660F3A,38,_,x,_,0,4,T4 ), 0 , 146, 0 , 5728 , 274, 111), // #1001
+ INST(Vinserti32x8 , VexRvmi , E(660F3A,3A,_,2,_,0,5,T8 ), 0 , 147, 0 , 5741 , 275, 61 ), // #1002
+ INST(Vinserti64x2 , VexRvmi_Lx , E(660F3A,38,_,x,_,1,4,T2 ), 0 , 148, 0 , 5754 , 274, 114), // #1003
+ INST(Vinserti64x4 , VexRvmi , E(660F3A,3A,_,2,_,1,5,T4 ), 0 , 149, 0 , 5767 , 275, 63 ), // #1004
+ INST(Vinsertps , VexRvmi , V(660F3A,21,_,0,I,0,2,T1S), 0 , 150, 0 , 5780 , 276, 107), // #1005
+ INST(Vlddqu , VexRm_Lx , V(F20F00,F0,_,x,I,_,_,_ ), 0 , 97 , 0 , 5790 , 277, 108), // #1006
+ INST(Vldmxcsr , VexM , V(000F00,AE,2,0,I,_,_,_ ), 0 , 162, 0 , 5797 , 278, 108), // #1007
+ INST(Vmaskmovdqu , VexRm_ZDI , V(660F00,F7,_,0,I,_,_,_ ), 0 , 63 , 0 , 5806 , 279, 108), // #1008
+ INST(Vmaskmovpd , VexRvmMvr_Lx , V(660F38,2D,_,x,0,_,_,_ ), V(660F38,2F,_,x,0,_,_,_ ), 88 , 83 , 5818 , 280, 108), // #1009
+ INST(Vmaskmovps , VexRvmMvr_Lx , V(660F38,2C,_,x,0,_,_,_ ), V(660F38,2E,_,x,0,_,_,_ ), 88 , 84 , 5829 , 280, 108), // #1010
+ INST(Vmaxpd , VexRvm_Lx , V(660F00,5F,_,x,I,1,4,FV ), 0 , 93 , 0 , 5840 , 281, 106), // #1011
+ INST(Vmaxps , VexRvm_Lx , V(000F00,5F,_,x,I,0,4,FV ), 0 , 94 , 0 , 5847 , 282, 106), // #1012
+ INST(Vmaxsd , VexRvm , V(F20F00,5F,_,I,I,1,3,T1S), 0 , 95 , 0 , 5854 , 283, 106), // #1013
+ INST(Vmaxss , VexRvm , V(F30F00,5F,_,I,I,0,2,T1S), 0 , 96 , 0 , 5861 , 222, 106), // #1014
+ INST(Vmcall , X86Op , O(000F01,C1,_,_,_,_,_,_ ), 0 , 21 , 0 , 5868 , 30 , 53 ), // #1015
+ INST(Vmclear , X86M_Only , O(660F00,C7,6,_,_,_,_,_ ), 0 , 24 , 0 , 5875 , 284, 53 ), // #1016
+ INST(Vmfunc , X86Op , O(000F01,D4,_,_,_,_,_,_ ), 0 , 21 , 0 , 5883 , 30 , 53 ), // #1017
+ INST(Vminpd , VexRvm_Lx , V(660F00,5D,_,x,I,1,4,FV ), 0 , 93 , 0 , 5890 , 281, 106), // #1018
+ INST(Vminps , VexRvm_Lx , V(000F00,5D,_,x,I,0,4,FV ), 0 , 94 , 0 , 5897 , 282, 106), // #1019
+ INST(Vminsd , VexRvm , V(F20F00,5D,_,I,I,1,3,T1S), 0 , 95 , 0 , 5904 , 283, 106), // #1020
+ INST(Vminss , VexRvm , V(F30F00,5D,_,I,I,0,2,T1S), 0 , 96 , 0 , 5911 , 222, 106), // #1021
+ INST(Vmlaunch , X86Op , O(000F01,C2,_,_,_,_,_,_ ), 0 , 21 , 0 , 5918 , 30 , 53 ), // #1022
+ INST(Vmload , X86Op_xAX , O(000F01,DA,_,_,_,_,_,_ ), 0 , 21 , 0 , 5927 , 285, 22 ), // #1023
+ INST(Vmmcall , X86Op , O(000F01,D9,_,_,_,_,_,_ ), 0 , 21 , 0 , 5934 , 30 , 22 ), // #1024
+ INST(Vmovapd , VexRmMr_Lx , V(660F00,28,_,x,I,1,4,FVM), V(660F00,29,_,x,I,1,4,FVM), 163, 85 , 5942 , 286, 106), // #1025
+ INST(Vmovaps , VexRmMr_Lx , V(000F00,28,_,x,I,0,4,FVM), V(000F00,29,_,x,I,0,4,FVM), 164, 86 , 5950 , 286, 106), // #1026
+ INST(Vmovd , VexMovdMovq , V(660F00,6E,_,0,0,0,2,T1S), V(660F00,7E,_,0,0,0,2,T1S), 165, 87 , 5958 , 287, 107), // #1027
+ INST(Vmovddup , VexRm_Lx , V(F20F00,12,_,x,I,1,3,DUP), 0 , 166, 0 , 5964 , 288, 106), // #1028
+ INST(Vmovdqa , VexRmMr_Lx , V(660F00,6F,_,x,I,_,_,_ ), V(660F00,7F,_,x,I,_,_,_ ), 63 , 88 , 5973 , 289, 108), // #1029
+ INST(Vmovdqa32 , VexRmMr_Lx , E(660F00,6F,_,x,_,0,4,FVM), E(660F00,7F,_,x,_,0,4,FVM), 167, 89 , 5981 , 290, 111), // #1030
+ INST(Vmovdqa64 , VexRmMr_Lx , E(660F00,6F,_,x,_,1,4,FVM), E(660F00,7F,_,x,_,1,4,FVM), 168, 90 , 5991 , 290, 111), // #1031
+ INST(Vmovdqu , VexRmMr_Lx , V(F30F00,6F,_,x,I,_,_,_ ), V(F30F00,7F,_,x,I,_,_,_ ), 169, 91 , 6001 , 289, 108), // #1032
+ INST(Vmovdqu16 , VexRmMr_Lx , E(F20F00,6F,_,x,_,1,4,FVM), E(F20F00,7F,_,x,_,1,4,FVM), 170, 92 , 6009 , 290, 113), // #1033
+ INST(Vmovdqu32 , VexRmMr_Lx , E(F30F00,6F,_,x,_,0,4,FVM), E(F30F00,7F,_,x,_,0,4,FVM), 171, 93 , 6019 , 290, 111), // #1034
+ INST(Vmovdqu64 , VexRmMr_Lx , E(F30F00,6F,_,x,_,1,4,FVM), E(F30F00,7F,_,x,_,1,4,FVM), 172, 94 , 6029 , 290, 111), // #1035
+ INST(Vmovdqu8 , VexRmMr_Lx , E(F20F00,6F,_,x,_,0,4,FVM), E(F20F00,7F,_,x,_,0,4,FVM), 173, 95 , 6039 , 290, 113), // #1036
+ INST(Vmovhlps , VexRvm , V(000F00,12,_,0,I,0,_,_ ), 0 , 66 , 0 , 6048 , 291, 107), // #1037
+ INST(Vmovhpd , VexRvmMr , V(660F00,16,_,0,I,1,3,T1S), V(660F00,17,_,0,I,1,3,T1S), 113, 96 , 6057 , 292, 107), // #1038
+ INST(Vmovhps , VexRvmMr , V(000F00,16,_,0,I,0,3,T2 ), V(000F00,17,_,0,I,0,3,T2 ), 174, 97 , 6065 , 292, 107), // #1039
+ INST(Vmovlhps , VexRvm , V(000F00,16,_,0,I,0,_,_ ), 0 , 66 , 0 , 6073 , 291, 107), // #1040
+ INST(Vmovlpd , VexRvmMr , V(660F00,12,_,0,I,1,3,T1S), V(660F00,13,_,0,I,1,3,T1S), 113, 98 , 6082 , 292, 107), // #1041
+ INST(Vmovlps , VexRvmMr , V(000F00,12,_,0,I,0,3,T2 ), V(000F00,13,_,0,I,0,3,T2 ), 174, 99 , 6090 , 292, 107), // #1042
+ INST(Vmovmskpd , VexRm_Lx , V(660F00,50,_,x,I,_,_,_ ), 0 , 63 , 0 , 6098 , 293, 108), // #1043
+ INST(Vmovmskps , VexRm_Lx , V(000F00,50,_,x,I,_,_,_ ), 0 , 66 , 0 , 6108 , 293, 108), // #1044
+ INST(Vmovntdq , VexMr_Lx , V(660F00,E7,_,x,I,0,4,FVM), 0 , 175, 0 , 6118 , 294, 106), // #1045
+ INST(Vmovntdqa , VexRm_Lx , V(660F38,2A,_,x,I,0,4,FVM), 0 , 98 , 0 , 6127 , 295, 116), // #1046
+ INST(Vmovntpd , VexMr_Lx , V(660F00,2B,_,x,I,1,4,FVM), 0 , 163, 0 , 6137 , 294, 106), // #1047
+ INST(Vmovntps , VexMr_Lx , V(000F00,2B,_,x,I,0,4,FVM), 0 , 164, 0 , 6146 , 294, 106), // #1048
+ INST(Vmovq , VexMovdMovq , V(660F00,6E,_,0,I,1,3,T1S), V(660F00,7E,_,0,I,1,3,T1S), 113, 100, 6155 , 296, 107), // #1049
+ INST(Vmovsd , VexMovssMovsd , V(F20F00,10,_,I,I,1,3,T1S), V(F20F00,11,_,I,I,1,3,T1S), 95 , 101, 6161 , 297, 107), // #1050
+ INST(Vmovshdup , VexRm_Lx , V(F30F00,16,_,x,I,0,4,FVM), 0 , 176, 0 , 6168 , 298, 106), // #1051
+ INST(Vmovsldup , VexRm_Lx , V(F30F00,12,_,x,I,0,4,FVM), 0 , 176, 0 , 6178 , 298, 106), // #1052
+ INST(Vmovss , VexMovssMovsd , V(F30F00,10,_,I,I,0,2,T1S), V(F30F00,11,_,I,I,0,2,T1S), 96 , 102, 6188 , 299, 107), // #1053
+ INST(Vmovupd , VexRmMr_Lx , V(660F00,10,_,x,I,1,4,FVM), V(660F00,11,_,x,I,1,4,FVM), 163, 103, 6195 , 286, 106), // #1054
+ INST(Vmovups , VexRmMr_Lx , V(000F00,10,_,x,I,0,4,FVM), V(000F00,11,_,x,I,0,4,FVM), 164, 104, 6203 , 286, 106), // #1055
+ INST(Vmpsadbw , VexRvmi_Lx , V(660F3A,42,_,x,I,_,_,_ ), 0 , 67 , 0 , 6211 , 192, 128), // #1056
+ INST(Vmptrld , X86M_Only , O(000F00,C7,6,_,_,_,_,_ ), 0 , 73 , 0 , 6220 , 284, 53 ), // #1057
+ INST(Vmptrst , X86M_Only , O(000F00,C7,7,_,_,_,_,_ ), 0 , 22 , 0 , 6228 , 284, 53 ), // #1058
+ INST(Vmread , X86Mr_NoSize , O(000F00,78,_,_,_,_,_,_ ), 0 , 4 , 0 , 6236 , 300, 53 ), // #1059
+ INST(Vmresume , X86Op , O(000F01,C3,_,_,_,_,_,_ ), 0 , 21 , 0 , 6243 , 30 , 53 ), // #1060
+ INST(Vmrun , X86Op_xAX , O(000F01,D8,_,_,_,_,_,_ ), 0 , 21 , 0 , 6252 , 285, 22 ), // #1061
+ INST(Vmsave , X86Op_xAX , O(000F01,DB,_,_,_,_,_,_ ), 0 , 21 , 0 , 6258 , 285, 22 ), // #1062
+ INST(Vmulpd , VexRvm_Lx , V(660F00,59,_,x,I,1,4,FV ), 0 , 93 , 0 , 6265 , 175, 106), // #1063
+ INST(Vmulps , VexRvm_Lx , V(000F00,59,_,x,I,0,4,FV ), 0 , 94 , 0 , 6272 , 176, 106), // #1064
+ INST(Vmulsd , VexRvm_Lx , V(F20F00,59,_,I,I,1,3,T1S), 0 , 95 , 0 , 6279 , 177, 107), // #1065
+ INST(Vmulss , VexRvm_Lx , V(F30F00,59,_,I,I,0,2,T1S), 0 , 96 , 0 , 6286 , 178, 107), // #1066
+ INST(Vmwrite , X86Rm_NoSize , O(000F00,79,_,_,_,_,_,_ ), 0 , 4 , 0 , 6293 , 301, 53 ), // #1067
+ INST(Vmxon , X86M_Only , O(F30F00,C7,6,_,_,_,_,_ ), 0 , 177, 0 , 6301 , 284, 53 ), // #1068
+ INST(Vorpd , VexRvm_Lx , V(660F00,56,_,x,I,1,4,FV ), 0 , 93 , 0 , 6307 , 187, 112), // #1069
+ INST(Vorps , VexRvm_Lx , V(000F00,56,_,x,I,0,4,FV ), 0 , 94 , 0 , 6313 , 188, 112), // #1070
+ INST(Vp4dpwssd , VexRm_T1_4X , E(F20F38,52,_,2,_,0,2,T4X), 0 , 92 , 0 , 6319 , 173, 129), // #1071
+ INST(Vp4dpwssds , VexRm_T1_4X , E(F20F38,53,_,2,_,0,2,T4X), 0 , 92 , 0 , 6329 , 173, 129), // #1072
+ INST(Vpabsb , VexRm_Lx , V(660F38,1C,_,x,I,_,4,FVM), 0 , 98 , 0 , 6340 , 298, 130), // #1073
+ INST(Vpabsd , VexRm_Lx , V(660F38,1E,_,x,I,0,4,FV ), 0 , 154, 0 , 6347 , 298, 116), // #1074
+ INST(Vpabsq , VexRm_Lx , E(660F38,1F,_,x,_,1,4,FV ), 0 , 103, 0 , 6354 , 241, 111), // #1075
+ INST(Vpabsw , VexRm_Lx , V(660F38,1D,_,x,I,_,4,FVM), 0 , 98 , 0 , 6361 , 298, 130), // #1076
+ INST(Vpackssdw , VexRvm_Lx , V(660F00,6B,_,x,I,0,4,FV ), 0 , 124, 0 , 6368 , 186, 130), // #1077
+ INST(Vpacksswb , VexRvm_Lx , V(660F00,63,_,x,I,I,4,FVM), 0 , 175, 0 , 6378 , 272, 130), // #1078
+ INST(Vpackusdw , VexRvm_Lx , V(660F38,2B,_,x,I,0,4,FV ), 0 , 154, 0 , 6388 , 186, 130), // #1079
+ INST(Vpackuswb , VexRvm_Lx , V(660F00,67,_,x,I,I,4,FVM), 0 , 175, 0 , 6398 , 272, 130), // #1080
+ INST(Vpaddb , VexRvm_Lx , V(660F00,FC,_,x,I,I,4,FVM), 0 , 175, 0 , 6408 , 272, 130), // #1081
+ INST(Vpaddd , VexRvm_Lx , V(660F00,FE,_,x,I,0,4,FV ), 0 , 124, 0 , 6415 , 186, 116), // #1082
+ INST(Vpaddq , VexRvm_Lx , V(660F00,D4,_,x,I,1,4,FV ), 0 , 93 , 0 , 6422 , 185, 116), // #1083
+ INST(Vpaddsb , VexRvm_Lx , V(660F00,EC,_,x,I,I,4,FVM), 0 , 175, 0 , 6429 , 272, 130), // #1084
+ INST(Vpaddsw , VexRvm_Lx , V(660F00,ED,_,x,I,I,4,FVM), 0 , 175, 0 , 6437 , 272, 130), // #1085
+ INST(Vpaddusb , VexRvm_Lx , V(660F00,DC,_,x,I,I,4,FVM), 0 , 175, 0 , 6445 , 272, 130), // #1086
+ INST(Vpaddusw , VexRvm_Lx , V(660F00,DD,_,x,I,I,4,FVM), 0 , 175, 0 , 6454 , 272, 130), // #1087
+ INST(Vpaddw , VexRvm_Lx , V(660F00,FD,_,x,I,I,4,FVM), 0 , 175, 0 , 6463 , 272, 130), // #1088
+ INST(Vpalignr , VexRvmi_Lx , V(660F3A,0F,_,x,I,I,4,FVM), 0 , 178, 0 , 6470 , 271, 130), // #1089
+ INST(Vpand , VexRvm_Lx , V(660F00,DB,_,x,I,_,_,_ ), 0 , 63 , 0 , 6479 , 302, 128), // #1090
+ INST(Vpandd , VexRvm_Lx , E(660F00,DB,_,x,_,0,4,FV ), 0 , 179, 0 , 6485 , 303, 111), // #1091
+ INST(Vpandn , VexRvm_Lx , V(660F00,DF,_,x,I,_,_,_ ), 0 , 63 , 0 , 6492 , 304, 128), // #1092
+ INST(Vpandnd , VexRvm_Lx , E(660F00,DF,_,x,_,0,4,FV ), 0 , 179, 0 , 6499 , 305, 111), // #1093
+ INST(Vpandnq , VexRvm_Lx , E(660F00,DF,_,x,_,1,4,FV ), 0 , 121, 0 , 6507 , 306, 111), // #1094
+ INST(Vpandq , VexRvm_Lx , E(660F00,DB,_,x,_,1,4,FV ), 0 , 121, 0 , 6515 , 307, 111), // #1095
+ INST(Vpavgb , VexRvm_Lx , V(660F00,E0,_,x,I,I,4,FVM), 0 , 175, 0 , 6522 , 272, 130), // #1096
+ INST(Vpavgw , VexRvm_Lx , V(660F00,E3,_,x,I,I,4,FVM), 0 , 175, 0 , 6529 , 272, 130), // #1097
+ INST(Vpblendd , VexRvmi_Lx , V(660F3A,02,_,x,0,_,_,_ ), 0 , 67 , 0 , 6536 , 192, 115), // #1098
+ INST(Vpblendvb , VexRvmr , V(660F3A,4C,_,x,0,_,_,_ ), 0 , 67 , 0 , 6545 , 193, 128), // #1099
+ INST(Vpblendw , VexRvmi_Lx , V(660F3A,0E,_,x,I,_,_,_ ), 0 , 67 , 0 , 6555 , 192, 128), // #1100
+ INST(Vpbroadcastb , VexRm_Lx_Bcst , V(660F38,78,_,x,0,0,0,T1S), E(660F38,7A,_,x,0,0,0,T1S), 180, 105, 6564 , 308, 131), // #1101
+ INST(Vpbroadcastd , VexRm_Lx_Bcst , V(660F38,58,_,x,0,0,2,T1S), E(660F38,7C,_,x,0,0,0,T1S), 112, 106, 6577 , 309, 125), // #1102
+ INST(Vpbroadcastmb2d , VexRm_Lx , E(F30F38,3A,_,x,_,0,_,_ ), 0 , 119, 0 , 6590 , 310, 132), // #1103
+ INST(Vpbroadcastmb2q , VexRm_Lx , E(F30F38,2A,_,x,_,1,_,_ ), 0 , 181, 0 , 6606 , 310, 132), // #1104
+ INST(Vpbroadcastq , VexRm_Lx_Bcst , V(660F38,59,_,x,0,1,3,T1S), E(660F38,7C,_,x,0,1,0,T1S), 111, 107, 6622 , 311, 125), // #1105
+ INST(Vpbroadcastw , VexRm_Lx_Bcst , V(660F38,79,_,x,0,0,1,T1S), E(660F38,7B,_,x,0,0,0,T1S), 182, 108, 6635 , 312, 131), // #1106
+ INST(Vpclmulqdq , VexRvmi_Lx , V(660F3A,44,_,x,I,_,4,FVM), 0 , 178, 0 , 6648 , 313, 133), // #1107
+ INST(Vpcmov , VexRvrmRvmr_Lx , V(XOP_M8,A2,_,x,x,_,_,_ ), 0 , 183, 0 , 6659 , 250, 124), // #1108
+ INST(Vpcmpb , VexRvmi_Lx , E(660F3A,3F,_,x,_,0,4,FVM), 0 , 142, 0 , 6666 , 314, 113), // #1109
+ INST(Vpcmpd , VexRvmi_Lx , E(660F3A,1F,_,x,_,0,4,FV ), 0 , 99 , 0 , 6673 , 315, 111), // #1110
+ INST(Vpcmpeqb , VexRvm_Lx , V(660F00,74,_,x,I,I,4,FV ), 0 , 124, 0 , 6680 , 316, 130), // #1111
+ INST(Vpcmpeqd , VexRvm_Lx , V(660F00,76,_,x,I,0,4,FVM), 0 , 175, 0 , 6689 , 317, 116), // #1112
+ INST(Vpcmpeqq , VexRvm_Lx , V(660F38,29,_,x,I,1,4,FVM), 0 , 184, 0 , 6698 , 318, 116), // #1113
+ INST(Vpcmpeqw , VexRvm_Lx , V(660F00,75,_,x,I,I,4,FV ), 0 , 124, 0 , 6707 , 316, 130), // #1114
+ INST(Vpcmpestri , VexRmi , V(660F3A,61,_,0,I,_,_,_ ), 0 , 67 , 0 , 6716 , 319, 134), // #1115
+ INST(Vpcmpestrm , VexRmi , V(660F3A,60,_,0,I,_,_,_ ), 0 , 67 , 0 , 6727 , 320, 134), // #1116
+ INST(Vpcmpgtb , VexRvm_Lx , V(660F00,64,_,x,I,I,4,FV ), 0 , 124, 0 , 6738 , 316, 130), // #1117
+ INST(Vpcmpgtd , VexRvm_Lx , V(660F00,66,_,x,I,0,4,FVM), 0 , 175, 0 , 6747 , 317, 116), // #1118
+ INST(Vpcmpgtq , VexRvm_Lx , V(660F38,37,_,x,I,1,4,FVM), 0 , 184, 0 , 6756 , 318, 116), // #1119
+ INST(Vpcmpgtw , VexRvm_Lx , V(660F00,65,_,x,I,I,4,FV ), 0 , 124, 0 , 6765 , 316, 130), // #1120
+ INST(Vpcmpistri , VexRmi , V(660F3A,63,_,0,I,_,_,_ ), 0 , 67 , 0 , 6774 , 321, 134), // #1121
+ INST(Vpcmpistrm , VexRmi , V(660F3A,62,_,0,I,_,_,_ ), 0 , 67 , 0 , 6785 , 322, 134), // #1122
+ INST(Vpcmpq , VexRvmi_Lx , E(660F3A,1F,_,x,_,1,4,FV ), 0 , 100, 0 , 6796 , 323, 111), // #1123
+ INST(Vpcmpub , VexRvmi_Lx , E(660F3A,3E,_,x,_,0,4,FVM), 0 , 142, 0 , 6803 , 314, 113), // #1124
+ INST(Vpcmpud , VexRvmi_Lx , E(660F3A,1E,_,x,_,0,4,FV ), 0 , 99 , 0 , 6811 , 315, 111), // #1125
+ INST(Vpcmpuq , VexRvmi_Lx , E(660F3A,1E,_,x,_,1,4,FV ), 0 , 100, 0 , 6819 , 323, 111), // #1126
+ INST(Vpcmpuw , VexRvmi_Lx , E(660F3A,3E,_,x,_,1,4,FVM), 0 , 185, 0 , 6827 , 323, 113), // #1127
+ INST(Vpcmpw , VexRvmi_Lx , E(660F3A,3F,_,x,_,1,4,FVM), 0 , 185, 0 , 6835 , 323, 113), // #1128
+ INST(Vpcomb , VexRvmi , V(XOP_M8,CC,_,0,0,_,_,_ ), 0 , 183, 0 , 6842 , 238, 124), // #1129
+ INST(Vpcomd , VexRvmi , V(XOP_M8,CE,_,0,0,_,_,_ ), 0 , 183, 0 , 6849 , 238, 124), // #1130
+ INST(Vpcompressb , VexMr_Lx , E(660F38,63,_,x,_,0,0,T1S), 0 , 186, 0 , 6856 , 207, 135), // #1131
+ INST(Vpcompressd , VexMr_Lx , E(660F38,8B,_,x,_,0,2,T1S), 0 , 116, 0 , 6868 , 207, 111), // #1132
+ INST(Vpcompressq , VexMr_Lx , E(660F38,8B,_,x,_,1,3,T1S), 0 , 115, 0 , 6880 , 207, 111), // #1133
+ INST(Vpcompressw , VexMr_Lx , E(660F38,63,_,x,_,1,1,T1S), 0 , 187, 0 , 6892 , 207, 135), // #1134
+ INST(Vpcomq , VexRvmi , V(XOP_M8,CF,_,0,0,_,_,_ ), 0 , 183, 0 , 6904 , 238, 124), // #1135
+ INST(Vpcomub , VexRvmi , V(XOP_M8,EC,_,0,0,_,_,_ ), 0 , 183, 0 , 6911 , 238, 124), // #1136
+ INST(Vpcomud , VexRvmi , V(XOP_M8,EE,_,0,0,_,_,_ ), 0 , 183, 0 , 6919 , 238, 124), // #1137
+ INST(Vpcomuq , VexRvmi , V(XOP_M8,EF,_,0,0,_,_,_ ), 0 , 183, 0 , 6927 , 238, 124), // #1138
+ INST(Vpcomuw , VexRvmi , V(XOP_M8,ED,_,0,0,_,_,_ ), 0 , 183, 0 , 6935 , 238, 124), // #1139
+ INST(Vpcomw , VexRvmi , V(XOP_M8,CD,_,0,0,_,_,_ ), 0 , 183, 0 , 6943 , 238, 124), // #1140
+ INST(Vpconflictd , VexRm_Lx , E(660F38,C4,_,x,_,0,4,FV ), 0 , 102, 0 , 6950 , 324, 132), // #1141
+ INST(Vpconflictq , VexRm_Lx , E(660F38,C4,_,x,_,1,4,FV ), 0 , 103, 0 , 6962 , 324, 132), // #1142
+ INST(Vpdpbusd , VexRvm_Lx , E(660F38,50,_,x,_,0,4,FV ), 0 , 102, 0 , 6974 , 190, 136), // #1143
+ INST(Vpdpbusds , VexRvm_Lx , E(660F38,51,_,x,_,0,4,FV ), 0 , 102, 0 , 6983 , 190, 136), // #1144
+ INST(Vpdpwssd , VexRvm_Lx , E(660F38,52,_,x,_,0,4,FV ), 0 , 102, 0 , 6993 , 190, 136), // #1145
+ INST(Vpdpwssds , VexRvm_Lx , E(660F38,53,_,x,_,0,4,FV ), 0 , 102, 0 , 7002 , 190, 136), // #1146
+ INST(Vperm2f128 , VexRvmi , V(660F3A,06,_,1,0,_,_,_ ), 0 , 145, 0 , 7012 , 325, 108), // #1147
+ INST(Vperm2i128 , VexRvmi , V(660F3A,46,_,1,0,_,_,_ ), 0 , 145, 0 , 7023 , 325, 115), // #1148
+ INST(Vpermb , VexRvm_Lx , E(660F38,8D,_,x,_,0,4,FVM), 0 , 101, 0 , 7034 , 189, 137), // #1149
+ INST(Vpermd , VexRvm_Lx , V(660F38,36,_,x,0,0,4,FV ), 0 , 154, 0 , 7041 , 326, 125), // #1150
+ INST(Vpermi2b , VexRvm_Lx , E(660F38,75,_,x,_,0,4,FVM), 0 , 101, 0 , 7048 , 189, 137), // #1151
+ INST(Vpermi2d , VexRvm_Lx , E(660F38,76,_,x,_,0,4,FV ), 0 , 102, 0 , 7057 , 190, 111), // #1152
+ INST(Vpermi2pd , VexRvm_Lx , E(660F38,77,_,x,_,1,4,FV ), 0 , 103, 0 , 7066 , 191, 111), // #1153
+ INST(Vpermi2ps , VexRvm_Lx , E(660F38,77,_,x,_,0,4,FV ), 0 , 102, 0 , 7076 , 190, 111), // #1154
+ INST(Vpermi2q , VexRvm_Lx , E(660F38,76,_,x,_,1,4,FV ), 0 , 103, 0 , 7086 , 191, 111), // #1155
+ INST(Vpermi2w , VexRvm_Lx , E(660F38,75,_,x,_,1,4,FVM), 0 , 104, 0 , 7095 , 189, 113), // #1156
+ INST(Vpermil2pd , VexRvrmiRvmri_Lx , V(660F3A,49,_,x,x,_,_,_ ), 0 , 67 , 0 , 7104 , 327, 124), // #1157
+ INST(Vpermil2ps , VexRvrmiRvmri_Lx , V(660F3A,48,_,x,x,_,_,_ ), 0 , 67 , 0 , 7115 , 327, 124), // #1158
+ INST(Vpermilpd , VexRvmRmi_Lx , V(660F38,0D,_,x,0,1,4,FV ), V(660F3A,05,_,x,0,1,4,FV ), 188, 109, 7126 , 328, 106), // #1159
+ INST(Vpermilps , VexRvmRmi_Lx , V(660F38,0C,_,x,0,0,4,FV ), V(660F3A,04,_,x,0,0,4,FV ), 154, 110, 7136 , 328, 106), // #1160
+ INST(Vpermpd , VexRvmRmi_Lx , E(660F38,16,_,x,1,1,4,FV ), V(660F3A,01,_,x,1,1,4,FV ), 189, 111, 7146 , 329, 125), // #1161
+ INST(Vpermps , VexRvm_Lx , V(660F38,16,_,x,0,0,4,FV ), 0 , 154, 0 , 7154 , 326, 125), // #1162
+ INST(Vpermq , VexRvmRmi_Lx , V(660F38,36,_,x,_,1,4,FV ), V(660F3A,00,_,x,1,1,4,FV ), 188, 112, 7162 , 329, 125), // #1163
+ INST(Vpermt2b , VexRvm_Lx , E(660F38,7D,_,x,_,0,4,FVM), 0 , 101, 0 , 7169 , 189, 137), // #1164
+ INST(Vpermt2d , VexRvm_Lx , E(660F38,7E,_,x,_,0,4,FV ), 0 , 102, 0 , 7178 , 190, 111), // #1165
+ INST(Vpermt2pd , VexRvm_Lx , E(660F38,7F,_,x,_,1,4,FV ), 0 , 103, 0 , 7187 , 191, 111), // #1166
+ INST(Vpermt2ps , VexRvm_Lx , E(660F38,7F,_,x,_,0,4,FV ), 0 , 102, 0 , 7197 , 190, 111), // #1167
+ INST(Vpermt2q , VexRvm_Lx , E(660F38,7E,_,x,_,1,4,FV ), 0 , 103, 0 , 7207 , 191, 111), // #1168
+ INST(Vpermt2w , VexRvm_Lx , E(660F38,7D,_,x,_,1,4,FVM), 0 , 104, 0 , 7216 , 189, 113), // #1169
+ INST(Vpermw , VexRvm_Lx , E(660F38,8D,_,x,_,1,4,FVM), 0 , 104, 0 , 7225 , 189, 113), // #1170
+ INST(Vpexpandb , VexRm_Lx , E(660F38,62,_,x,_,0,0,T1S), 0 , 186, 0 , 7232 , 241, 135), // #1171
+ INST(Vpexpandd , VexRm_Lx , E(660F38,89,_,x,_,0,2,T1S), 0 , 116, 0 , 7242 , 241, 111), // #1172
+ INST(Vpexpandq , VexRm_Lx , E(660F38,89,_,x,_,1,3,T1S), 0 , 115, 0 , 7252 , 241, 111), // #1173
+ INST(Vpexpandw , VexRm_Lx , E(660F38,62,_,x,_,1,1,T1S), 0 , 187, 0 , 7262 , 241, 135), // #1174
+ INST(Vpextrb , VexMri , V(660F3A,14,_,0,0,I,0,T1S), 0 , 190, 0 , 7272 , 330, 138), // #1175
+ INST(Vpextrd , VexMri , V(660F3A,16,_,0,0,0,2,T1S), 0 , 150, 0 , 7280 , 245, 139), // #1176
+ INST(Vpextrq , VexMri , V(660F3A,16,_,0,1,1,3,T1S), 0 , 191, 0 , 7288 , 331, 139), // #1177
+ INST(Vpextrw , VexMri , V(660F3A,15,_,0,0,I,1,T1S), 0 , 192, 0 , 7296 , 332, 138), // #1178
+ INST(Vpgatherdd , VexRmvRm_VM , V(660F38,90,_,x,0,_,_,_ ), V(660F38,90,_,x,_,0,2,T1S), 88 , 113, 7304 , 261, 125), // #1179
+ INST(Vpgatherdq , VexRmvRm_VM , V(660F38,90,_,x,1,_,_,_ ), V(660F38,90,_,x,_,1,3,T1S), 156, 114, 7315 , 260, 125), // #1180
+ INST(Vpgatherqd , VexRmvRm_VM , V(660F38,91,_,x,0,_,_,_ ), V(660F38,91,_,x,_,0,2,T1S), 88 , 115, 7326 , 266, 125), // #1181
+ INST(Vpgatherqq , VexRmvRm_VM , V(660F38,91,_,x,1,_,_,_ ), V(660F38,91,_,x,_,1,3,T1S), 156, 116, 7337 , 265, 125), // #1182
+ INST(Vphaddbd , VexRm , V(XOP_M9,C2,_,0,0,_,_,_ ), 0 , 72 , 0 , 7348 , 181, 124), // #1183
+ INST(Vphaddbq , VexRm , V(XOP_M9,C3,_,0,0,_,_,_ ), 0 , 72 , 0 , 7357 , 181, 124), // #1184
+ INST(Vphaddbw , VexRm , V(XOP_M9,C1,_,0,0,_,_,_ ), 0 , 72 , 0 , 7366 , 181, 124), // #1185
+ INST(Vphaddd , VexRvm_Lx , V(660F38,02,_,x,I,_,_,_ ), 0 , 88 , 0 , 7375 , 179, 128), // #1186
+ INST(Vphadddq , VexRm , V(XOP_M9,CB,_,0,0,_,_,_ ), 0 , 72 , 0 , 7383 , 181, 124), // #1187
+ INST(Vphaddsw , VexRvm_Lx , V(660F38,03,_,x,I,_,_,_ ), 0 , 88 , 0 , 7392 , 179, 128), // #1188
+ INST(Vphaddubd , VexRm , V(XOP_M9,D2,_,0,0,_,_,_ ), 0 , 72 , 0 , 7401 , 181, 124), // #1189
+ INST(Vphaddubq , VexRm , V(XOP_M9,D3,_,0,0,_,_,_ ), 0 , 72 , 0 , 7411 , 181, 124), // #1190
+ INST(Vphaddubw , VexRm , V(XOP_M9,D1,_,0,0,_,_,_ ), 0 , 72 , 0 , 7421 , 181, 124), // #1191
+ INST(Vphaddudq , VexRm , V(XOP_M9,DB,_,0,0,_,_,_ ), 0 , 72 , 0 , 7431 , 181, 124), // #1192
+ INST(Vphadduwd , VexRm , V(XOP_M9,D6,_,0,0,_,_,_ ), 0 , 72 , 0 , 7441 , 181, 124), // #1193
+ INST(Vphadduwq , VexRm , V(XOP_M9,D7,_,0,0,_,_,_ ), 0 , 72 , 0 , 7451 , 181, 124), // #1194
+ INST(Vphaddw , VexRvm_Lx , V(660F38,01,_,x,I,_,_,_ ), 0 , 88 , 0 , 7461 , 179, 128), // #1195
+ INST(Vphaddwd , VexRm , V(XOP_M9,C6,_,0,0,_,_,_ ), 0 , 72 , 0 , 7469 , 181, 124), // #1196
+ INST(Vphaddwq , VexRm , V(XOP_M9,C7,_,0,0,_,_,_ ), 0 , 72 , 0 , 7478 , 181, 124), // #1197
+ INST(Vphminposuw , VexRm , V(660F38,41,_,0,I,_,_,_ ), 0 , 88 , 0 , 7487 , 181, 108), // #1198
+ INST(Vphsubbw , VexRm , V(XOP_M9,E1,_,0,0,_,_,_ ), 0 , 72 , 0 , 7499 , 181, 124), // #1199
+ INST(Vphsubd , VexRvm_Lx , V(660F38,06,_,x,I,_,_,_ ), 0 , 88 , 0 , 7508 , 179, 128), // #1200
+ INST(Vphsubdq , VexRm , V(XOP_M9,E3,_,0,0,_,_,_ ), 0 , 72 , 0 , 7516 , 181, 124), // #1201
+ INST(Vphsubsw , VexRvm_Lx , V(660F38,07,_,x,I,_,_,_ ), 0 , 88 , 0 , 7525 , 179, 128), // #1202
+ INST(Vphsubw , VexRvm_Lx , V(660F38,05,_,x,I,_,_,_ ), 0 , 88 , 0 , 7534 , 179, 128), // #1203
+ INST(Vphsubwd , VexRm , V(XOP_M9,E2,_,0,0,_,_,_ ), 0 , 72 , 0 , 7542 , 181, 124), // #1204
+ INST(Vpinsrb , VexRvmi , V(660F3A,20,_,0,0,I,0,T1S), 0 , 190, 0 , 7551 , 333, 138), // #1205
+ INST(Vpinsrd , VexRvmi , V(660F3A,22,_,0,0,0,2,T1S), 0 , 150, 0 , 7559 , 334, 139), // #1206
+ INST(Vpinsrq , VexRvmi , V(660F3A,22,_,0,1,1,3,T1S), 0 , 191, 0 , 7567 , 335, 139), // #1207
+ INST(Vpinsrw , VexRvmi , V(660F00,C4,_,0,0,I,1,T1S), 0 , 193, 0 , 7575 , 336, 138), // #1208
+ INST(Vplzcntd , VexRm_Lx , E(660F38,44,_,x,_,0,4,FV ), 0 , 102, 0 , 7583 , 324, 132), // #1209
+ INST(Vplzcntq , VexRm_Lx , E(660F38,44,_,x,_,1,4,FV ), 0 , 103, 0 , 7592 , 337, 132), // #1210
+ INST(Vpmacsdd , VexRvmr , V(XOP_M8,9E,_,0,0,_,_,_ ), 0 , 183, 0 , 7601 , 338, 124), // #1211
+ INST(Vpmacsdqh , VexRvmr , V(XOP_M8,9F,_,0,0,_,_,_ ), 0 , 183, 0 , 7610 , 338, 124), // #1212
+ INST(Vpmacsdql , VexRvmr , V(XOP_M8,97,_,0,0,_,_,_ ), 0 , 183, 0 , 7620 , 338, 124), // #1213
+ INST(Vpmacssdd , VexRvmr , V(XOP_M8,8E,_,0,0,_,_,_ ), 0 , 183, 0 , 7630 , 338, 124), // #1214
+ INST(Vpmacssdqh , VexRvmr , V(XOP_M8,8F,_,0,0,_,_,_ ), 0 , 183, 0 , 7640 , 338, 124), // #1215
+ INST(Vpmacssdql , VexRvmr , V(XOP_M8,87,_,0,0,_,_,_ ), 0 , 183, 0 , 7651 , 338, 124), // #1216
+ INST(Vpmacsswd , VexRvmr , V(XOP_M8,86,_,0,0,_,_,_ ), 0 , 183, 0 , 7662 , 338, 124), // #1217
+ INST(Vpmacssww , VexRvmr , V(XOP_M8,85,_,0,0,_,_,_ ), 0 , 183, 0 , 7672 , 338, 124), // #1218
+ INST(Vpmacswd , VexRvmr , V(XOP_M8,96,_,0,0,_,_,_ ), 0 , 183, 0 , 7682 , 338, 124), // #1219
+ INST(Vpmacsww , VexRvmr , V(XOP_M8,95,_,0,0,_,_,_ ), 0 , 183, 0 , 7691 , 338, 124), // #1220
+ INST(Vpmadcsswd , VexRvmr , V(XOP_M8,A6,_,0,0,_,_,_ ), 0 , 183, 0 , 7700 , 338, 124), // #1221
+ INST(Vpmadcswd , VexRvmr , V(XOP_M8,B6,_,0,0,_,_,_ ), 0 , 183, 0 , 7711 , 338, 124), // #1222
+ INST(Vpmadd52huq , VexRvm_Lx , E(660F38,B5,_,x,_,1,4,FV ), 0 , 103, 0 , 7721 , 191, 140), // #1223
+ INST(Vpmadd52luq , VexRvm_Lx , E(660F38,B4,_,x,_,1,4,FV ), 0 , 103, 0 , 7733 , 191, 140), // #1224
+ INST(Vpmaddubsw , VexRvm_Lx , V(660F38,04,_,x,I,I,4,FVM), 0 , 98 , 0 , 7745 , 272, 130), // #1225
+ INST(Vpmaddwd , VexRvm_Lx , V(660F00,F5,_,x,I,I,4,FVM), 0 , 175, 0 , 7756 , 272, 130), // #1226
+ INST(Vpmaskmovd , VexRvmMvr_Lx , V(660F38,8C,_,x,0,_,_,_ ), V(660F38,8E,_,x,0,_,_,_ ), 88 , 117, 7765 , 280, 115), // #1227
+ INST(Vpmaskmovq , VexRvmMvr_Lx , V(660F38,8C,_,x,1,_,_,_ ), V(660F38,8E,_,x,1,_,_,_ ), 156, 118, 7776 , 280, 115), // #1228
+ INST(Vpmaxsb , VexRvm_Lx , V(660F38,3C,_,x,I,I,4,FVM), 0 , 98 , 0 , 7787 , 339, 130), // #1229
+ INST(Vpmaxsd , VexRvm_Lx , V(660F38,3D,_,x,I,0,4,FV ), 0 , 154, 0 , 7795 , 188, 116), // #1230
+ INST(Vpmaxsq , VexRvm_Lx , E(660F38,3D,_,x,_,1,4,FV ), 0 , 103, 0 , 7803 , 191, 111), // #1231
+ INST(Vpmaxsw , VexRvm_Lx , V(660F00,EE,_,x,I,I,4,FVM), 0 , 175, 0 , 7811 , 339, 130), // #1232
+ INST(Vpmaxub , VexRvm_Lx , V(660F00,DE,_,x,I,I,4,FVM), 0 , 175, 0 , 7819 , 339, 130), // #1233
+ INST(Vpmaxud , VexRvm_Lx , V(660F38,3F,_,x,I,0,4,FV ), 0 , 154, 0 , 7827 , 188, 116), // #1234
+ INST(Vpmaxuq , VexRvm_Lx , E(660F38,3F,_,x,_,1,4,FV ), 0 , 103, 0 , 7835 , 191, 111), // #1235
+ INST(Vpmaxuw , VexRvm_Lx , V(660F38,3E,_,x,I,I,4,FVM), 0 , 98 , 0 , 7843 , 339, 130), // #1236
+ INST(Vpminsb , VexRvm_Lx , V(660F38,38,_,x,I,I,4,FVM), 0 , 98 , 0 , 7851 , 339, 130), // #1237
+ INST(Vpminsd , VexRvm_Lx , V(660F38,39,_,x,I,0,4,FV ), 0 , 154, 0 , 7859 , 188, 116), // #1238
+ INST(Vpminsq , VexRvm_Lx , E(660F38,39,_,x,_,1,4,FV ), 0 , 103, 0 , 7867 , 191, 111), // #1239
+ INST(Vpminsw , VexRvm_Lx , V(660F00,EA,_,x,I,I,4,FVM), 0 , 175, 0 , 7875 , 339, 130), // #1240
+ INST(Vpminub , VexRvm_Lx , V(660F00,DA,_,x,I,_,4,FVM), 0 , 175, 0 , 7883 , 339, 130), // #1241
+ INST(Vpminud , VexRvm_Lx , V(660F38,3B,_,x,I,0,4,FV ), 0 , 154, 0 , 7891 , 188, 116), // #1242
+ INST(Vpminuq , VexRvm_Lx , E(660F38,3B,_,x,_,1,4,FV ), 0 , 103, 0 , 7899 , 191, 111), // #1243
+ INST(Vpminuw , VexRvm_Lx , V(660F38,3A,_,x,I,_,4,FVM), 0 , 98 , 0 , 7907 , 339, 130), // #1244
+ INST(Vpmovb2m , VexRm_Lx , E(F30F38,29,_,x,_,0,_,_ ), 0 , 119, 0 , 7915 , 340, 113), // #1245
+ INST(Vpmovd2m , VexRm_Lx , E(F30F38,39,_,x,_,0,_,_ ), 0 , 119, 0 , 7924 , 340, 114), // #1246
+ INST(Vpmovdb , VexMr_Lx , E(F30F38,31,_,x,_,0,2,QVM), 0 , 194, 0 , 7933 , 341, 111), // #1247
+ INST(Vpmovdw , VexMr_Lx , E(F30F38,33,_,x,_,0,3,HVM), 0 , 195, 0 , 7941 , 342, 111), // #1248
+ INST(Vpmovm2b , VexRm_Lx , E(F30F38,28,_,x,_,0,_,_ ), 0 , 119, 0 , 7949 , 310, 113), // #1249
+ INST(Vpmovm2d , VexRm_Lx , E(F30F38,38,_,x,_,0,_,_ ), 0 , 119, 0 , 7958 , 310, 114), // #1250
+ INST(Vpmovm2q , VexRm_Lx , E(F30F38,38,_,x,_,1,_,_ ), 0 , 181, 0 , 7967 , 310, 114), // #1251
+ INST(Vpmovm2w , VexRm_Lx , E(F30F38,28,_,x,_,1,_,_ ), 0 , 181, 0 , 7976 , 310, 113), // #1252
+ INST(Vpmovmskb , VexRm_Lx , V(660F00,D7,_,x,I,_,_,_ ), 0 , 63 , 0 , 7985 , 293, 128), // #1253
+ INST(Vpmovq2m , VexRm_Lx , E(F30F38,39,_,x,_,1,_,_ ), 0 , 181, 0 , 7995 , 340, 114), // #1254
+ INST(Vpmovqb , VexMr_Lx , E(F30F38,32,_,x,_,0,1,OVM), 0 , 196, 0 , 8004 , 343, 111), // #1255
+ INST(Vpmovqd , VexMr_Lx , E(F30F38,35,_,x,_,0,3,HVM), 0 , 195, 0 , 8012 , 342, 111), // #1256
+ INST(Vpmovqw , VexMr_Lx , E(F30F38,34,_,x,_,0,2,QVM), 0 , 194, 0 , 8020 , 341, 111), // #1257
+ INST(Vpmovsdb , VexMr_Lx , E(F30F38,21,_,x,_,0,2,QVM), 0 , 194, 0 , 8028 , 341, 111), // #1258
+ INST(Vpmovsdw , VexMr_Lx , E(F30F38,23,_,x,_,0,3,HVM), 0 , 195, 0 , 8037 , 342, 111), // #1259
+ INST(Vpmovsqb , VexMr_Lx , E(F30F38,22,_,x,_,0,1,OVM), 0 , 196, 0 , 8046 , 343, 111), // #1260
+ INST(Vpmovsqd , VexMr_Lx , E(F30F38,25,_,x,_,0,3,HVM), 0 , 195, 0 , 8055 , 342, 111), // #1261
+ INST(Vpmovsqw , VexMr_Lx , E(F30F38,24,_,x,_,0,2,QVM), 0 , 194, 0 , 8064 , 341, 111), // #1262
+ INST(Vpmovswb , VexMr_Lx , E(F30F38,20,_,x,_,0,3,HVM), 0 , 195, 0 , 8073 , 342, 113), // #1263
+ INST(Vpmovsxbd , VexRm_Lx , V(660F38,21,_,x,I,I,2,QVM), 0 , 197, 0 , 8082 , 344, 116), // #1264
+ INST(Vpmovsxbq , VexRm_Lx , V(660F38,22,_,x,I,I,1,OVM), 0 , 198, 0 , 8092 , 345, 116), // #1265
+ INST(Vpmovsxbw , VexRm_Lx , V(660F38,20,_,x,I,I,3,HVM), 0 , 123, 0 , 8102 , 346, 130), // #1266
+ INST(Vpmovsxdq , VexRm_Lx , V(660F38,25,_,x,I,0,3,HVM), 0 , 123, 0 , 8112 , 346, 116), // #1267
+ INST(Vpmovsxwd , VexRm_Lx , V(660F38,23,_,x,I,I,3,HVM), 0 , 123, 0 , 8122 , 346, 116), // #1268
+ INST(Vpmovsxwq , VexRm_Lx , V(660F38,24,_,x,I,I,2,QVM), 0 , 197, 0 , 8132 , 344, 116), // #1269
+ INST(Vpmovusdb , VexMr_Lx , E(F30F38,11,_,x,_,0,2,QVM), 0 , 194, 0 , 8142 , 341, 111), // #1270
+ INST(Vpmovusdw , VexMr_Lx , E(F30F38,13,_,x,_,0,3,HVM), 0 , 195, 0 , 8152 , 342, 111), // #1271
+ INST(Vpmovusqb , VexMr_Lx , E(F30F38,12,_,x,_,0,1,OVM), 0 , 196, 0 , 8162 , 343, 111), // #1272
+ INST(Vpmovusqd , VexMr_Lx , E(F30F38,15,_,x,_,0,3,HVM), 0 , 195, 0 , 8172 , 342, 111), // #1273
+ INST(Vpmovusqw , VexMr_Lx , E(F30F38,14,_,x,_,0,2,QVM), 0 , 194, 0 , 8182 , 341, 111), // #1274
+ INST(Vpmovuswb , VexMr_Lx , E(F30F38,10,_,x,_,0,3,HVM), 0 , 195, 0 , 8192 , 342, 113), // #1275
+ INST(Vpmovw2m , VexRm_Lx , E(F30F38,29,_,x,_,1,_,_ ), 0 , 181, 0 , 8202 , 340, 113), // #1276
+ INST(Vpmovwb , VexMr_Lx , E(F30F38,30,_,x,_,0,3,HVM), 0 , 195, 0 , 8211 , 342, 113), // #1277
+ INST(Vpmovzxbd , VexRm_Lx , V(660F38,31,_,x,I,I,2,QVM), 0 , 197, 0 , 8219 , 344, 116), // #1278
+ INST(Vpmovzxbq , VexRm_Lx , V(660F38,32,_,x,I,I,1,OVM), 0 , 198, 0 , 8229 , 345, 116), // #1279
+ INST(Vpmovzxbw , VexRm_Lx , V(660F38,30,_,x,I,I,3,HVM), 0 , 123, 0 , 8239 , 346, 130), // #1280
+ INST(Vpmovzxdq , VexRm_Lx , V(660F38,35,_,x,I,0,3,HVM), 0 , 123, 0 , 8249 , 346, 116), // #1281
+ INST(Vpmovzxwd , VexRm_Lx , V(660F38,33,_,x,I,I,3,HVM), 0 , 123, 0 , 8259 , 346, 116), // #1282
+ INST(Vpmovzxwq , VexRm_Lx , V(660F38,34,_,x,I,I,2,QVM), 0 , 197, 0 , 8269 , 344, 116), // #1283
+ INST(Vpmuldq , VexRvm_Lx , V(660F38,28,_,x,I,1,4,FV ), 0 , 188, 0 , 8279 , 185, 116), // #1284
+ INST(Vpmulhrsw , VexRvm_Lx , V(660F38,0B,_,x,I,I,4,FVM), 0 , 98 , 0 , 8287 , 272, 130), // #1285
+ INST(Vpmulhuw , VexRvm_Lx , V(660F00,E4,_,x,I,I,4,FVM), 0 , 175, 0 , 8297 , 272, 130), // #1286
+ INST(Vpmulhw , VexRvm_Lx , V(660F00,E5,_,x,I,I,4,FVM), 0 , 175, 0 , 8306 , 272, 130), // #1287
+ INST(Vpmulld , VexRvm_Lx , V(660F38,40,_,x,I,0,4,FV ), 0 , 154, 0 , 8314 , 186, 116), // #1288
+ INST(Vpmullq , VexRvm_Lx , E(660F38,40,_,x,_,1,4,FV ), 0 , 103, 0 , 8322 , 191, 114), // #1289
+ INST(Vpmullw , VexRvm_Lx , V(660F00,D5,_,x,I,I,4,FVM), 0 , 175, 0 , 8330 , 272, 130), // #1290
+ INST(Vpmultishiftqb , VexRvm_Lx , E(660F38,83,_,x,_,1,4,FV ), 0 , 103, 0 , 8338 , 191, 137), // #1291
+ INST(Vpmuludq , VexRvm_Lx , V(660F00,F4,_,x,I,1,4,FV ), 0 , 93 , 0 , 8353 , 185, 116), // #1292
+ INST(Vpopcntb , VexRm_Lx , E(660F38,54,_,x,_,0,4,FV ), 0 , 102, 0 , 8362 , 241, 141), // #1293
+ INST(Vpopcntd , VexRm_Lx , E(660F38,55,_,x,_,0,4,FVM), 0 , 101, 0 , 8371 , 324, 142), // #1294
+ INST(Vpopcntq , VexRm_Lx , E(660F38,55,_,x,_,1,4,FVM), 0 , 104, 0 , 8380 , 337, 142), // #1295
+ INST(Vpopcntw , VexRm_Lx , E(660F38,54,_,x,_,1,4,FV ), 0 , 103, 0 , 8389 , 241, 141), // #1296
+ INST(Vpor , VexRvm_Lx , V(660F00,EB,_,x,I,_,_,_ ), 0 , 63 , 0 , 8398 , 302, 128), // #1297
+ INST(Vpord , VexRvm_Lx , E(660F00,EB,_,x,_,0,4,FV ), 0 , 179, 0 , 8403 , 303, 111), // #1298
+ INST(Vporq , VexRvm_Lx , E(660F00,EB,_,x,_,1,4,FV ), 0 , 121, 0 , 8409 , 307, 111), // #1299
+ INST(Vpperm , VexRvrmRvmr , V(XOP_M8,A3,_,0,x,_,_,_ ), 0 , 183, 0 , 8415 , 347, 124), // #1300
+ INST(Vprold , VexVmi_Lx , E(660F00,72,1,x,_,0,4,FV ), 0 , 199, 0 , 8422 , 348, 111), // #1301
+ INST(Vprolq , VexVmi_Lx , E(660F00,72,1,x,_,1,4,FV ), 0 , 200, 0 , 8429 , 349, 111), // #1302
+ INST(Vprolvd , VexRvm_Lx , E(660F38,15,_,x,_,0,4,FV ), 0 , 102, 0 , 8436 , 190, 111), // #1303
+ INST(Vprolvq , VexRvm_Lx , E(660F38,15,_,x,_,1,4,FV ), 0 , 103, 0 , 8444 , 191, 111), // #1304
+ INST(Vprord , VexVmi_Lx , E(660F00,72,0,x,_,0,4,FV ), 0 , 179, 0 , 8452 , 348, 111), // #1305
+ INST(Vprorq , VexVmi_Lx , E(660F00,72,0,x,_,1,4,FV ), 0 , 121, 0 , 8459 , 349, 111), // #1306
+ INST(Vprorvd , VexRvm_Lx , E(660F38,14,_,x,_,0,4,FV ), 0 , 102, 0 , 8466 , 190, 111), // #1307
+ INST(Vprorvq , VexRvm_Lx , E(660F38,14,_,x,_,1,4,FV ), 0 , 103, 0 , 8474 , 191, 111), // #1308
+ INST(Vprotb , VexRvmRmvRmi , V(XOP_M9,90,_,0,x,_,_,_ ), V(XOP_M8,C0,_,0,x,_,_,_ ), 72 , 119, 8482 , 350, 124), // #1309
+ INST(Vprotd , VexRvmRmvRmi , V(XOP_M9,92,_,0,x,_,_,_ ), V(XOP_M8,C2,_,0,x,_,_,_ ), 72 , 120, 8489 , 350, 124), // #1310
+ INST(Vprotq , VexRvmRmvRmi , V(XOP_M9,93,_,0,x,_,_,_ ), V(XOP_M8,C3,_,0,x,_,_,_ ), 72 , 121, 8496 , 350, 124), // #1311
+ INST(Vprotw , VexRvmRmvRmi , V(XOP_M9,91,_,0,x,_,_,_ ), V(XOP_M8,C1,_,0,x,_,_,_ ), 72 , 122, 8503 , 350, 124), // #1312
+ INST(Vpsadbw , VexRvm_Lx , V(660F00,F6,_,x,I,I,4,FVM), 0 , 175, 0 , 8510 , 180, 130), // #1313
+ INST(Vpscatterdd , VexMr_VM , E(660F38,A0,_,x,_,0,2,T1S), 0 , 116, 0 , 8518 , 351, 111), // #1314
+ INST(Vpscatterdq , VexMr_VM , E(660F38,A0,_,x,_,1,3,T1S), 0 , 115, 0 , 8530 , 351, 111), // #1315
+ INST(Vpscatterqd , VexMr_VM , E(660F38,A1,_,x,_,0,2,T1S), 0 , 116, 0 , 8542 , 352, 111), // #1316
+ INST(Vpscatterqq , VexMr_VM , E(660F38,A1,_,x,_,1,3,T1S), 0 , 115, 0 , 8554 , 353, 111), // #1317
+ INST(Vpshab , VexRvmRmv , V(XOP_M9,98,_,0,x,_,_,_ ), 0 , 72 , 0 , 8566 , 354, 124), // #1318
+ INST(Vpshad , VexRvmRmv , V(XOP_M9,9A,_,0,x,_,_,_ ), 0 , 72 , 0 , 8573 , 354, 124), // #1319
+ INST(Vpshaq , VexRvmRmv , V(XOP_M9,9B,_,0,x,_,_,_ ), 0 , 72 , 0 , 8580 , 354, 124), // #1320
+ INST(Vpshaw , VexRvmRmv , V(XOP_M9,99,_,0,x,_,_,_ ), 0 , 72 , 0 , 8587 , 354, 124), // #1321
+ INST(Vpshlb , VexRvmRmv , V(XOP_M9,94,_,0,x,_,_,_ ), 0 , 72 , 0 , 8594 , 354, 124), // #1322
+ INST(Vpshld , VexRvmRmv , V(XOP_M9,96,_,0,x,_,_,_ ), 0 , 72 , 0 , 8601 , 354, 124), // #1323
+ INST(Vpshldd , VexRvmi_Lx , E(660F3A,71,_,x,_,0,4,FV ), 0 , 99 , 0 , 8608 , 183, 135), // #1324
+ INST(Vpshldq , VexRvmi_Lx , E(660F3A,71,_,x,_,1,4,FV ), 0 , 100, 0 , 8616 , 184, 135), // #1325
+ INST(Vpshldvd , VexRvm_Lx , E(660F38,71,_,x,_,0,4,FV ), 0 , 102, 0 , 8624 , 190, 135), // #1326
+ INST(Vpshldvq , VexRvm_Lx , E(660F38,71,_,x,_,1,4,FV ), 0 , 103, 0 , 8633 , 191, 135), // #1327
+ INST(Vpshldvw , VexRvm_Lx , E(660F38,70,_,x,_,0,4,FVM), 0 , 101, 0 , 8642 , 189, 135), // #1328
+ INST(Vpshldw , VexRvmi_Lx , E(660F3A,70,_,x,_,0,4,FVM), 0 , 142, 0 , 8651 , 237, 135), // #1329
+ INST(Vpshlq , VexRvmRmv , V(XOP_M9,97,_,0,x,_,_,_ ), 0 , 72 , 0 , 8659 , 354, 124), // #1330
+ INST(Vpshlw , VexRvmRmv , V(XOP_M9,95,_,0,x,_,_,_ ), 0 , 72 , 0 , 8666 , 354, 124), // #1331
+ INST(Vpshrdd , VexRvmi_Lx , E(660F3A,73,_,x,_,0,4,FV ), 0 , 99 , 0 , 8673 , 183, 135), // #1332
+ INST(Vpshrdq , VexRvmi_Lx , E(660F3A,73,_,x,_,1,4,FV ), 0 , 100, 0 , 8681 , 184, 135), // #1333
+ INST(Vpshrdvd , VexRvm_Lx , E(660F38,73,_,x,_,0,4,FV ), 0 , 102, 0 , 8689 , 190, 135), // #1334
+ INST(Vpshrdvq , VexRvm_Lx , E(660F38,73,_,x,_,1,4,FV ), 0 , 103, 0 , 8698 , 191, 135), // #1335
+ INST(Vpshrdvw , VexRvm_Lx , E(660F38,72,_,x,_,0,4,FVM), 0 , 101, 0 , 8707 , 189, 135), // #1336
+ INST(Vpshrdw , VexRvmi_Lx , E(660F3A,72,_,x,_,0,4,FVM), 0 , 142, 0 , 8716 , 237, 135), // #1337
+ INST(Vpshufb , VexRvm_Lx , V(660F38,00,_,x,I,I,4,FVM), 0 , 98 , 0 , 8724 , 272, 130), // #1338
+ INST(Vpshufbitqmb , VexRvm_Lx , E(660F38,8F,_,x,0,0,4,FVM), 0 , 101, 0 , 8732 , 355, 141), // #1339
+ INST(Vpshufd , VexRmi_Lx , V(660F00,70,_,x,I,0,4,FV ), 0 , 124, 0 , 8745 , 356, 116), // #1340
+ INST(Vpshufhw , VexRmi_Lx , V(F30F00,70,_,x,I,I,4,FVM), 0 , 176, 0 , 8753 , 357, 130), // #1341
+ INST(Vpshuflw , VexRmi_Lx , V(F20F00,70,_,x,I,I,4,FVM), 0 , 201, 0 , 8762 , 357, 130), // #1342
+ INST(Vpsignb , VexRvm_Lx , V(660F38,08,_,x,I,_,_,_ ), 0 , 88 , 0 , 8771 , 179, 128), // #1343
+ INST(Vpsignd , VexRvm_Lx , V(660F38,0A,_,x,I,_,_,_ ), 0 , 88 , 0 , 8779 , 179, 128), // #1344
+ INST(Vpsignw , VexRvm_Lx , V(660F38,09,_,x,I,_,_,_ ), 0 , 88 , 0 , 8787 , 179, 128), // #1345
+ INST(Vpslld , VexRvmVmi_Lx , V(660F00,F2,_,x,I,0,4,128), V(660F00,72,6,x,I,0,4,FV ), 202, 123, 8795 , 358, 116), // #1346
+ INST(Vpslldq , VexEvexVmi_Lx , V(660F00,73,7,x,I,I,4,FVM), 0 , 203, 0 , 8802 , 359, 130), // #1347
+ INST(Vpsllq , VexRvmVmi_Lx , V(660F00,F3,_,x,I,1,4,128), V(660F00,73,6,x,I,1,4,FV ), 204, 124, 8810 , 360, 116), // #1348
+ INST(Vpsllvd , VexRvm_Lx , V(660F38,47,_,x,0,0,4,FV ), 0 , 154, 0 , 8817 , 186, 125), // #1349
+ INST(Vpsllvq , VexRvm_Lx , V(660F38,47,_,x,1,1,4,FV ), 0 , 153, 0 , 8825 , 185, 125), // #1350
+ INST(Vpsllvw , VexRvm_Lx , E(660F38,12,_,x,_,1,4,FVM), 0 , 104, 0 , 8833 , 189, 113), // #1351
+ INST(Vpsllw , VexRvmVmi_Lx , V(660F00,F1,_,x,I,I,4,FVM), V(660F00,71,6,x,I,I,4,FVM), 175, 125, 8841 , 361, 130), // #1352
+ INST(Vpsrad , VexRvmVmi_Lx , V(660F00,E2,_,x,I,0,4,128), V(660F00,72,4,x,I,0,4,FV ), 202, 126, 8848 , 358, 116), // #1353
+ INST(Vpsraq , VexRvmVmi_Lx , E(660F00,E2,_,x,_,1,4,128), E(660F00,72,4,x,_,1,4,FV ), 205, 127, 8855 , 362, 111), // #1354
+ INST(Vpsravd , VexRvm_Lx , V(660F38,46,_,x,0,0,4,FV ), 0 , 154, 0 , 8862 , 186, 125), // #1355
+ INST(Vpsravq , VexRvm_Lx , E(660F38,46,_,x,_,1,4,FV ), 0 , 103, 0 , 8870 , 191, 111), // #1356
+ INST(Vpsravw , VexRvm_Lx , E(660F38,11,_,x,_,1,4,FVM), 0 , 104, 0 , 8878 , 189, 113), // #1357
+ INST(Vpsraw , VexRvmVmi_Lx , V(660F00,E1,_,x,I,I,4,128), V(660F00,71,4,x,I,I,4,FVM), 202, 128, 8886 , 361, 130), // #1358
+ INST(Vpsrld , VexRvmVmi_Lx , V(660F00,D2,_,x,I,0,4,128), V(660F00,72,2,x,I,0,4,FV ), 202, 129, 8893 , 358, 116), // #1359
+ INST(Vpsrldq , VexEvexVmi_Lx , V(660F00,73,3,x,I,I,4,FVM), 0 , 206, 0 , 8900 , 359, 130), // #1360
+ INST(Vpsrlq , VexRvmVmi_Lx , V(660F00,D3,_,x,I,1,4,128), V(660F00,73,2,x,I,1,4,FV ), 204, 130, 8908 , 360, 116), // #1361
+ INST(Vpsrlvd , VexRvm_Lx , V(660F38,45,_,x,0,0,4,FV ), 0 , 154, 0 , 8915 , 186, 125), // #1362
+ INST(Vpsrlvq , VexRvm_Lx , V(660F38,45,_,x,1,1,4,FV ), 0 , 153, 0 , 8923 , 185, 125), // #1363
+ INST(Vpsrlvw , VexRvm_Lx , E(660F38,10,_,x,_,1,4,FVM), 0 , 104, 0 , 8931 , 189, 113), // #1364
+ INST(Vpsrlw , VexRvmVmi_Lx , V(660F00,D1,_,x,I,I,4,128), V(660F00,71,2,x,I,I,4,FVM), 202, 131, 8939 , 361, 130), // #1365
+ INST(Vpsubb , VexRvm_Lx , V(660F00,F8,_,x,I,I,4,FVM), 0 , 175, 0 , 8946 , 363, 130), // #1366
+ INST(Vpsubd , VexRvm_Lx , V(660F00,FA,_,x,I,0,4,FV ), 0 , 124, 0 , 8953 , 364, 116), // #1367
+ INST(Vpsubq , VexRvm_Lx , V(660F00,FB,_,x,I,1,4,FV ), 0 , 93 , 0 , 8960 , 365, 116), // #1368
+ INST(Vpsubsb , VexRvm_Lx , V(660F00,E8,_,x,I,I,4,FVM), 0 , 175, 0 , 8967 , 363, 130), // #1369
+ INST(Vpsubsw , VexRvm_Lx , V(660F00,E9,_,x,I,I,4,FVM), 0 , 175, 0 , 8975 , 363, 130), // #1370
+ INST(Vpsubusb , VexRvm_Lx , V(660F00,D8,_,x,I,I,4,FVM), 0 , 175, 0 , 8983 , 363, 130), // #1371
+ INST(Vpsubusw , VexRvm_Lx , V(660F00,D9,_,x,I,I,4,FVM), 0 , 175, 0 , 8992 , 363, 130), // #1372
+ INST(Vpsubw , VexRvm_Lx , V(660F00,F9,_,x,I,I,4,FVM), 0 , 175, 0 , 9001 , 363, 130), // #1373
+ INST(Vpternlogd , VexRvmi_Lx , E(660F3A,25,_,x,_,0,4,FV ), 0 , 99 , 0 , 9008 , 183, 111), // #1374
+ INST(Vpternlogq , VexRvmi_Lx , E(660F3A,25,_,x,_,1,4,FV ), 0 , 100, 0 , 9019 , 184, 111), // #1375
+ INST(Vptest , VexRm_Lx , V(660F38,17,_,x,I,_,_,_ ), 0 , 88 , 0 , 9030 , 257, 134), // #1376
+ INST(Vptestmb , VexRvm_Lx , E(660F38,26,_,x,_,0,4,FVM), 0 , 101, 0 , 9037 , 355, 113), // #1377
+ INST(Vptestmd , VexRvm_Lx , E(660F38,27,_,x,_,0,4,FV ), 0 , 102, 0 , 9046 , 366, 111), // #1378
+ INST(Vptestmq , VexRvm_Lx , E(660F38,27,_,x,_,1,4,FV ), 0 , 103, 0 , 9055 , 367, 111), // #1379
+ INST(Vptestmw , VexRvm_Lx , E(660F38,26,_,x,_,1,4,FVM), 0 , 104, 0 , 9064 , 355, 113), // #1380
+ INST(Vptestnmb , VexRvm_Lx , E(F30F38,26,_,x,_,0,4,FVM), 0 , 207, 0 , 9073 , 355, 113), // #1381
+ INST(Vptestnmd , VexRvm_Lx , E(F30F38,27,_,x,_,0,4,FV ), 0 , 208, 0 , 9083 , 366, 111), // #1382
+ INST(Vptestnmq , VexRvm_Lx , E(F30F38,27,_,x,_,1,4,FV ), 0 , 209, 0 , 9093 , 367, 111), // #1383
+ INST(Vptestnmw , VexRvm_Lx , E(F30F38,26,_,x,_,1,4,FVM), 0 , 210, 0 , 9103 , 355, 113), // #1384
+ INST(Vpunpckhbw , VexRvm_Lx , V(660F00,68,_,x,I,I,4,FVM), 0 , 175, 0 , 9113 , 272, 130), // #1385
+ INST(Vpunpckhdq , VexRvm_Lx , V(660F00,6A,_,x,I,0,4,FV ), 0 , 124, 0 , 9124 , 186, 116), // #1386
+ INST(Vpunpckhqdq , VexRvm_Lx , V(660F00,6D,_,x,I,1,4,FV ), 0 , 93 , 0 , 9135 , 185, 116), // #1387
+ INST(Vpunpckhwd , VexRvm_Lx , V(660F00,69,_,x,I,I,4,FVM), 0 , 175, 0 , 9147 , 272, 130), // #1388
+ INST(Vpunpcklbw , VexRvm_Lx , V(660F00,60,_,x,I,I,4,FVM), 0 , 175, 0 , 9158 , 272, 130), // #1389
+ INST(Vpunpckldq , VexRvm_Lx , V(660F00,62,_,x,I,0,4,FV ), 0 , 124, 0 , 9169 , 186, 116), // #1390
+ INST(Vpunpcklqdq , VexRvm_Lx , V(660F00,6C,_,x,I,1,4,FV ), 0 , 93 , 0 , 9180 , 185, 116), // #1391
+ INST(Vpunpcklwd , VexRvm_Lx , V(660F00,61,_,x,I,I,4,FVM), 0 , 175, 0 , 9192 , 272, 130), // #1392
+ INST(Vpxor , VexRvm_Lx , V(660F00,EF,_,x,I,_,_,_ ), 0 , 63 , 0 , 9203 , 304, 128), // #1393
+ INST(Vpxord , VexRvm_Lx , E(660F00,EF,_,x,_,0,4,FV ), 0 , 179, 0 , 9209 , 305, 111), // #1394
+ INST(Vpxorq , VexRvm_Lx , E(660F00,EF,_,x,_,1,4,FV ), 0 , 121, 0 , 9216 , 306, 111), // #1395
+ INST(Vrangepd , VexRvmi_Lx , E(660F3A,50,_,x,_,1,4,FV ), 0 , 100, 0 , 9223 , 246, 114), // #1396
+ INST(Vrangeps , VexRvmi_Lx , E(660F3A,50,_,x,_,0,4,FV ), 0 , 99 , 0 , 9232 , 247, 114), // #1397
+ INST(Vrangesd , VexRvmi , E(660F3A,51,_,I,_,1,3,T1S), 0 , 151, 0 , 9241 , 248, 61 ), // #1398
+ INST(Vrangess , VexRvmi , E(660F3A,51,_,I,_,0,2,T1S), 0 , 152, 0 , 9250 , 249, 61 ), // #1399
+ INST(Vrcp14pd , VexRm_Lx , E(660F38,4C,_,x,_,1,4,FV ), 0 , 103, 0 , 9259 , 337, 111), // #1400
+ INST(Vrcp14ps , VexRm_Lx , E(660F38,4C,_,x,_,0,4,FV ), 0 , 102, 0 , 9268 , 324, 111), // #1401
+ INST(Vrcp14sd , VexRvm , E(660F38,4D,_,I,_,1,3,T1S), 0 , 115, 0 , 9277 , 368, 63 ), // #1402
+ INST(Vrcp14ss , VexRvm , E(660F38,4D,_,I,_,0,2,T1S), 0 , 116, 0 , 9286 , 369, 63 ), // #1403
+ INST(Vrcp28pd , VexRm , E(660F38,CA,_,2,_,1,4,FV ), 0 , 143, 0 , 9295 , 239, 120), // #1404
+ INST(Vrcp28ps , VexRm , E(660F38,CA,_,2,_,0,4,FV ), 0 , 144, 0 , 9304 , 240, 120), // #1405
+ INST(Vrcp28sd , VexRvm , E(660F38,CB,_,I,_,1,3,T1S), 0 , 115, 0 , 9313 , 267, 120), // #1406
+ INST(Vrcp28ss , VexRvm , E(660F38,CB,_,I,_,0,2,T1S), 0 , 116, 0 , 9322 , 268, 120), // #1407
+ INST(Vrcpps , VexRm_Lx , V(000F00,53,_,x,I,_,_,_ ), 0 , 66 , 0 , 9331 , 257, 108), // #1408
+ INST(Vrcpss , VexRvm , V(F30F00,53,_,I,I,_,_,_ ), 0 , 169, 0 , 9338 , 370, 108), // #1409
+ INST(Vreducepd , VexRmi_Lx , E(660F3A,56,_,x,_,1,4,FV ), 0 , 100, 0 , 9345 , 349, 114), // #1410
+ INST(Vreduceps , VexRmi_Lx , E(660F3A,56,_,x,_,0,4,FV ), 0 , 99 , 0 , 9355 , 348, 114), // #1411
+ INST(Vreducesd , VexRvmi , E(660F3A,57,_,I,_,1,3,T1S), 0 , 151, 0 , 9365 , 371, 61 ), // #1412
+ INST(Vreducess , VexRvmi , E(660F3A,57,_,I,_,0,2,T1S), 0 , 152, 0 , 9375 , 372, 61 ), // #1413
+ INST(Vrndscalepd , VexRmi_Lx , E(660F3A,09,_,x,_,1,4,FV ), 0 , 100, 0 , 9385 , 269, 111), // #1414
+ INST(Vrndscaleps , VexRmi_Lx , E(660F3A,08,_,x,_,0,4,FV ), 0 , 99 , 0 , 9397 , 270, 111), // #1415
+ INST(Vrndscalesd , VexRvmi , E(660F3A,0B,_,I,_,1,3,T1S), 0 , 151, 0 , 9409 , 248, 63 ), // #1416
+ INST(Vrndscaless , VexRvmi , E(660F3A,0A,_,I,_,0,2,T1S), 0 , 152, 0 , 9421 , 249, 63 ), // #1417
+ INST(Vroundpd , VexRmi_Lx , V(660F3A,09,_,x,I,_,_,_ ), 0 , 67 , 0 , 9433 , 373, 108), // #1418
+ INST(Vroundps , VexRmi_Lx , V(660F3A,08,_,x,I,_,_,_ ), 0 , 67 , 0 , 9442 , 373, 108), // #1419
+ INST(Vroundsd , VexRvmi , V(660F3A,0B,_,I,I,_,_,_ ), 0 , 67 , 0 , 9451 , 374, 108), // #1420
+ INST(Vroundss , VexRvmi , V(660F3A,0A,_,I,I,_,_,_ ), 0 , 67 , 0 , 9460 , 375, 108), // #1421
+ INST(Vrsqrt14pd , VexRm_Lx , E(660F38,4E,_,x,_,1,4,FV ), 0 , 103, 0 , 9469 , 337, 111), // #1422
+ INST(Vrsqrt14ps , VexRm_Lx , E(660F38,4E,_,x,_,0,4,FV ), 0 , 102, 0 , 9480 , 324, 111), // #1423
+ INST(Vrsqrt14sd , VexRvm , E(660F38,4F,_,I,_,1,3,T1S), 0 , 115, 0 , 9491 , 368, 63 ), // #1424
+ INST(Vrsqrt14ss , VexRvm , E(660F38,4F,_,I,_,0,2,T1S), 0 , 116, 0 , 9502 , 369, 63 ), // #1425
+ INST(Vrsqrt28pd , VexRm , E(660F38,CC,_,2,_,1,4,FV ), 0 , 143, 0 , 9513 , 239, 120), // #1426
+ INST(Vrsqrt28ps , VexRm , E(660F38,CC,_,2,_,0,4,FV ), 0 , 144, 0 , 9524 , 240, 120), // #1427
+ INST(Vrsqrt28sd , VexRvm , E(660F38,CD,_,I,_,1,3,T1S), 0 , 115, 0 , 9535 , 267, 120), // #1428
+ INST(Vrsqrt28ss , VexRvm , E(660F38,CD,_,I,_,0,2,T1S), 0 , 116, 0 , 9546 , 268, 120), // #1429
+ INST(Vrsqrtps , VexRm_Lx , V(000F00,52,_,x,I,_,_,_ ), 0 , 66 , 0 , 9557 , 257, 108), // #1430
+ INST(Vrsqrtss , VexRvm , V(F30F00,52,_,I,I,_,_,_ ), 0 , 169, 0 , 9566 , 370, 108), // #1431
+ INST(Vscalefpd , VexRvm_Lx , E(660F38,2C,_,x,_,1,4,FV ), 0 , 103, 0 , 9575 , 376, 111), // #1432
+ INST(Vscalefps , VexRvm_Lx , E(660F38,2C,_,x,_,0,4,FV ), 0 , 102, 0 , 9585 , 377, 111), // #1433
+ INST(Vscalefsd , VexRvm , E(660F38,2D,_,I,_,1,3,T1S), 0 , 115, 0 , 9595 , 378, 63 ), // #1434
+ INST(Vscalefss , VexRvm , E(660F38,2D,_,I,_,0,2,T1S), 0 , 116, 0 , 9605 , 379, 63 ), // #1435
+ INST(Vscatterdpd , VexMr_Lx , E(660F38,A2,_,x,_,1,3,T1S), 0 , 115, 0 , 9615 , 380, 111), // #1436
+ INST(Vscatterdps , VexMr_Lx , E(660F38,A2,_,x,_,0,2,T1S), 0 , 116, 0 , 9627 , 351, 111), // #1437
+ INST(Vscatterpf0dpd , VexM_VM , E(660F38,C6,5,2,_,1,3,T1S), 0 , 211, 0 , 9639 , 262, 126), // #1438
+ INST(Vscatterpf0dps , VexM_VM , E(660F38,C6,5,2,_,0,2,T1S), 0 , 212, 0 , 9654 , 263, 126), // #1439
+ INST(Vscatterpf0qpd , VexM_VM , E(660F38,C7,5,2,_,1,3,T1S), 0 , 211, 0 , 9669 , 264, 126), // #1440
+ INST(Vscatterpf0qps , VexM_VM , E(660F38,C7,5,2,_,0,2,T1S), 0 , 212, 0 , 9684 , 264, 126), // #1441
+ INST(Vscatterpf1dpd , VexM_VM , E(660F38,C6,6,2,_,1,3,T1S), 0 , 213, 0 , 9699 , 262, 126), // #1442
+ INST(Vscatterpf1dps , VexM_VM , E(660F38,C6,6,2,_,0,2,T1S), 0 , 214, 0 , 9714 , 263, 126), // #1443
+ INST(Vscatterpf1qpd , VexM_VM , E(660F38,C7,6,2,_,1,3,T1S), 0 , 213, 0 , 9729 , 264, 126), // #1444
+ INST(Vscatterpf1qps , VexM_VM , E(660F38,C7,6,2,_,0,2,T1S), 0 , 214, 0 , 9744 , 264, 126), // #1445
+ INST(Vscatterqpd , VexMr_Lx , E(660F38,A3,_,x,_,1,3,T1S), 0 , 115, 0 , 9759 , 353, 111), // #1446
+ INST(Vscatterqps , VexMr_Lx , E(660F38,A3,_,x,_,0,2,T1S), 0 , 116, 0 , 9771 , 352, 111), // #1447
+ INST(Vshuff32x4 , VexRvmi_Lx , E(660F3A,23,_,x,_,0,4,FV ), 0 , 99 , 0 , 9783 , 381, 111), // #1448
+ INST(Vshuff64x2 , VexRvmi_Lx , E(660F3A,23,_,x,_,1,4,FV ), 0 , 100, 0 , 9794 , 382, 111), // #1449
+ INST(Vshufi32x4 , VexRvmi_Lx , E(660F3A,43,_,x,_,0,4,FV ), 0 , 99 , 0 , 9805 , 381, 111), // #1450
+ INST(Vshufi64x2 , VexRvmi_Lx , E(660F3A,43,_,x,_,1,4,FV ), 0 , 100, 0 , 9816 , 382, 111), // #1451
+ INST(Vshufpd , VexRvmi_Lx , V(660F00,C6,_,x,I,1,4,FV ), 0 , 93 , 0 , 9827 , 383, 106), // #1452
+ INST(Vshufps , VexRvmi_Lx , V(000F00,C6,_,x,I,0,4,FV ), 0 , 94 , 0 , 9835 , 384, 106), // #1453
+ INST(Vsqrtpd , VexRm_Lx , V(660F00,51,_,x,I,1,4,FV ), 0 , 93 , 0 , 9843 , 385, 106), // #1454
+ INST(Vsqrtps , VexRm_Lx , V(000F00,51,_,x,I,0,4,FV ), 0 , 94 , 0 , 9851 , 209, 106), // #1455
+ INST(Vsqrtsd , VexRvm , V(F20F00,51,_,I,I,1,3,T1S), 0 , 95 , 0 , 9859 , 177, 107), // #1456
+ INST(Vsqrtss , VexRvm , V(F30F00,51,_,I,I,0,2,T1S), 0 , 96 , 0 , 9867 , 178, 107), // #1457
+ INST(Vstmxcsr , VexM , V(000F00,AE,3,0,I,_,_,_ ), 0 , 215, 0 , 9875 , 278, 108), // #1458
+ INST(Vsubpd , VexRvm_Lx , V(660F00,5C,_,x,I,1,4,FV ), 0 , 93 , 0 , 9884 , 175, 106), // #1459
+ INST(Vsubps , VexRvm_Lx , V(000F00,5C,_,x,I,0,4,FV ), 0 , 94 , 0 , 9891 , 176, 106), // #1460
+ INST(Vsubsd , VexRvm , V(F20F00,5C,_,I,I,1,3,T1S), 0 , 95 , 0 , 9898 , 177, 107), // #1461
+ INST(Vsubss , VexRvm , V(F30F00,5C,_,I,I,0,2,T1S), 0 , 96 , 0 , 9905 , 178, 107), // #1462
+ INST(Vtestpd , VexRm_Lx , V(660F38,0F,_,x,0,_,_,_ ), 0 , 88 , 0 , 9912 , 257, 134), // #1463
+ INST(Vtestps , VexRm_Lx , V(660F38,0E,_,x,0,_,_,_ ), 0 , 88 , 0 , 9920 , 257, 134), // #1464
+ INST(Vucomisd , VexRm , V(660F00,2E,_,I,I,1,3,T1S), 0 , 113, 0 , 9928 , 205, 117), // #1465
+ INST(Vucomiss , VexRm , V(000F00,2E,_,I,I,0,2,T1S), 0 , 114, 0 , 9937 , 206, 117), // #1466
+ INST(Vunpckhpd , VexRvm_Lx , V(660F00,15,_,x,I,1,4,FV ), 0 , 93 , 0 , 9946 , 185, 106), // #1467
+ INST(Vunpckhps , VexRvm_Lx , V(000F00,15,_,x,I,0,4,FV ), 0 , 94 , 0 , 9956 , 186, 106), // #1468
+ INST(Vunpcklpd , VexRvm_Lx , V(660F00,14,_,x,I,1,4,FV ), 0 , 93 , 0 , 9966 , 185, 106), // #1469
+ INST(Vunpcklps , VexRvm_Lx , V(000F00,14,_,x,I,0,4,FV ), 0 , 94 , 0 , 9976 , 186, 106), // #1470
+ INST(Vxorpd , VexRvm_Lx , V(660F00,57,_,x,I,1,4,FV ), 0 , 93 , 0 , 9986 , 365, 112), // #1471
+ INST(Vxorps , VexRvm_Lx , V(000F00,57,_,x,I,0,4,FV ), 0 , 94 , 0 , 9993 , 364, 112), // #1472
+ INST(Vzeroall , VexOp , V(000F00,77,_,1,I,_,_,_ ), 0 , 62 , 0 , 10000, 386, 108), // #1473
+ INST(Vzeroupper , VexOp , V(000F00,77,_,0,I,_,_,_ ), 0 , 66 , 0 , 10009, 386, 108), // #1474
+ INST(Wbinvd , X86Op , O(000F00,09,_,_,_,_,_,_ ), 0 , 4 , 0 , 10020, 30 , 0 ), // #1475
+ INST(Wbnoinvd , X86Op , O(F30F00,09,_,_,_,_,_,_ ), 0 , 6 , 0 , 10027, 30 , 143), // #1476
+ INST(Wrfsbase , X86M , O(F30F00,AE,2,_,x,_,_,_ ), 0 , 216, 0 , 10036, 161, 94 ), // #1477
+ INST(Wrgsbase , X86M , O(F30F00,AE,3,_,x,_,_,_ ), 0 , 217, 0 , 10045, 161, 94 ), // #1478
+ INST(Wrmsr , X86Op , O(000F00,30,_,_,_,_,_,_ ), 0 , 4 , 0 , 10054, 162, 95 ), // #1479
+ INST(Xabort , X86Op_O_I8 , O(000000,C6,7,_,_,_,_,_ ), 0 , 25 , 0 , 10060, 74 , 144), // #1480
+ INST(Xadd , X86Xadd , O(000F00,C0,_,_,x,_,_,_ ), 0 , 4 , 0 , 10067, 387, 36 ), // #1481
+ INST(Xbegin , X86JmpRel , O(000000,C7,7,_,_,_,_,_ ), 0 , 25 , 0 , 10072, 388, 144), // #1482
+ INST(Xchg , X86Xchg , O(000000,86,_,_,x,_,_,_ ), 0 , 0 , 0 , 448 , 389, 0 ), // #1483
+ INST(Xend , X86Op , O(000F01,D5,_,_,_,_,_,_ ), 0 , 21 , 0 , 10079, 30 , 144), // #1484
+ INST(Xgetbv , X86Op , O(000F01,D0,_,_,_,_,_,_ ), 0 , 21 , 0 , 10084, 162, 145), // #1485
+ INST(Xlatb , X86Op , O(000000,D7,_,_,_,_,_,_ ), 0 , 0 , 0 , 10091, 30 , 0 ), // #1486
+ INST(Xor , X86Arith , O(000000,30,6,_,x,_,_,_ ), 0 , 30 , 0 , 9205 , 166, 1 ), // #1487
+ INST(Xorpd , ExtRm , O(660F00,57,_,_,_,_,_,_ ), 0 , 3 , 0 , 9987 , 140, 4 ), // #1488
+ INST(Xorps , ExtRm , O(000F00,57,_,_,_,_,_,_ ), 0 , 4 , 0 , 9994 , 140, 5 ), // #1489
+ INST(Xrstor , X86M_Only , O(000F00,AE,5,_,_,_,_,_ ), 0 , 70 , 0 , 1134 , 390, 145), // #1490
+ INST(Xrstor64 , X86M_Only , O(000F00,AE,5,_,1,_,_,_ ), 0 , 218, 0 , 1142 , 391, 145), // #1491
+ INST(Xrstors , X86M_Only , O(000F00,C7,3,_,_,_,_,_ ), 0 , 71 , 0 , 10097, 390, 146), // #1492
+ INST(Xrstors64 , X86M_Only , O(000F00,C7,3,_,1,_,_,_ ), 0 , 219, 0 , 10105, 391, 146), // #1493
+ INST(Xsave , X86M_Only , O(000F00,AE,4,_,_,_,_,_ ), 0 , 89 , 0 , 1152 , 390, 145), // #1494
+ INST(Xsave64 , X86M_Only , O(000F00,AE,4,_,1,_,_,_ ), 0 , 220, 0 , 1159 , 391, 145), // #1495
+ INST(Xsavec , X86M_Only , O(000F00,C7,4,_,_,_,_,_ ), 0 , 89 , 0 , 10115, 390, 147), // #1496
+ INST(Xsavec64 , X86M_Only , O(000F00,C7,4,_,1,_,_,_ ), 0 , 220, 0 , 10122, 391, 147), // #1497
+ INST(Xsaveopt , X86M_Only , O(000F00,AE,6,_,_,_,_,_ ), 0 , 73 , 0 , 10131, 390, 148), // #1498
+ INST(Xsaveopt64 , X86M_Only , O(000F00,AE,6,_,1,_,_,_ ), 0 , 221, 0 , 10140, 391, 148), // #1499
+ INST(Xsaves , X86M_Only , O(000F00,C7,5,_,_,_,_,_ ), 0 , 70 , 0 , 10151, 390, 146), // #1500
+ INST(Xsaves64 , X86M_Only , O(000F00,C7,5,_,1,_,_,_ ), 0 , 218, 0 , 10158, 391, 146), // #1501
+ INST(Xsetbv , X86Op , O(000F01,D1,_,_,_,_,_,_ ), 0 , 21 , 0 , 10167, 162, 145), // #1502
+ INST(Xtest , X86Op , O(000F01,D6,_,_,_,_,_,_ ), 0 , 21 , 0 , 10174, 30 , 149) // #1503
+ // ${InstInfo:End}
+};
+#undef NAME_DATA_INDEX
+#undef INST
+
+// ============================================================================
+// [asmjit::x86::InstDB - Opcode Tables]
+// ============================================================================
+
+// ${MainOpcodeTable:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+const uint32_t InstDB::_mainOpcodeTable[] = {
+ O(000000,00,0,0,0,0,0,_ ), // #0 [ref=55x]
+ O(000000,00,2,0,0,0,0,_ ), // #1 [ref=4x]
+ O(660F38,00,0,0,0,0,0,_ ), // #2 [ref=42x]
+ O(660F00,00,0,0,0,0,0,_ ), // #3 [ref=38x]
+ O(000F00,00,0,0,0,0,0,_ ), // #4 [ref=231x]
+ O(F20F00,00,0,0,0,0,0,_ ), // #5 [ref=24x]
+ O(F30F00,00,0,0,0,0,0,_ ), // #6 [ref=29x]
+ O(F30F38,00,0,0,0,0,0,_ ), // #7 [ref=2x]
+ O(660F3A,00,0,0,0,0,0,_ ), // #8 [ref=22x]
+ O(000000,00,4,0,0,0,0,_ ), // #9 [ref=5x]
+ V(000F38,00,0,0,0,0,0,_ ), // #10 [ref=3x]
+ V(XOP_M9,00,1,0,0,0,0,_ ), // #11 [ref=3x]
+ V(XOP_M9,00,6,0,0,0,0,_ ), // #12 [ref=2x]
+ V(XOP_M9,00,5,0,0,0,0,_ ), // #13 [ref=1x]
+ V(XOP_M9,00,3,0,0,0,0,_ ), // #14 [ref=1x]
+ V(XOP_M9,00,2,0,0,0,0,_ ), // #15 [ref=1x]
+ V(000F38,00,3,0,0,0,0,_ ), // #16 [ref=1x]
+ V(000F38,00,2,0,0,0,0,_ ), // #17 [ref=1x]
+ V(000F38,00,1,0,0,0,0,_ ), // #18 [ref=1x]
+ O(660000,00,0,0,0,0,0,_ ), // #19 [ref=7x]
+ O(000000,00,0,0,1,0,0,_ ), // #20 [ref=4x]
+ O(000F01,00,0,0,0,0,0,_ ), // #21 [ref=25x]
+ O(000F00,00,7,0,0,0,0,_ ), // #22 [ref=5x]
+ O(660F00,00,7,0,0,0,0,_ ), // #23 [ref=2x]
+ O(660F00,00,6,0,0,0,0,_ ), // #24 [ref=2x]
+ O(000000,00,7,0,0,0,0,_ ), // #25 [ref=5x]
+ O(000F00,00,1,0,1,0,0,_ ), // #26 [ref=2x]
+ O(000F00,00,1,0,0,0,0,_ ), // #27 [ref=6x]
+ O(F20F38,00,0,0,0,0,0,_ ), // #28 [ref=2x]
+ O(000000,00,1,0,0,0,0,_ ), // #29 [ref=3x]
+ O(000000,00,6,0,0,0,0,_ ), // #30 [ref=3x]
+ O_FPU(00,D900,_) , // #31 [ref=29x]
+ O_FPU(00,C000,0) , // #32 [ref=1x]
+ O_FPU(00,DE00,_) , // #33 [ref=7x]
+ O_FPU(00,0000,4) , // #34 [ref=4x]
+ O_FPU(00,0000,6) , // #35 [ref=4x]
+ O_FPU(9B,DB00,_) , // #36 [ref=2x]
+ O_FPU(00,DA00,_) , // #37 [ref=5x]
+ O_FPU(00,DB00,_) , // #38 [ref=8x]
+ O_FPU(00,D000,2) , // #39 [ref=1x]
+ O_FPU(00,DF00,_) , // #40 [ref=2x]
+ O_FPU(00,D800,3) , // #41 [ref=1x]
+ O_FPU(00,F000,6) , // #42 [ref=1x]
+ O_FPU(00,F800,7) , // #43 [ref=1x]
+ O_FPU(00,DD00,_) , // #44 [ref=3x]
+ O_FPU(00,0000,0) , // #45 [ref=3x]
+ O_FPU(00,0000,2) , // #46 [ref=3x]
+ O_FPU(00,0000,3) , // #47 [ref=3x]
+ O_FPU(00,0000,7) , // #48 [ref=3x]
+ O_FPU(00,0000,1) , // #49 [ref=2x]
+ O_FPU(00,0000,5) , // #50 [ref=2x]
+ O_FPU(00,C800,1) , // #51 [ref=1x]
+ O_FPU(9B,0000,6) , // #52 [ref=2x]
+ O_FPU(9B,0000,7) , // #53 [ref=2x]
+ O_FPU(00,E000,4) , // #54 [ref=1x]
+ O_FPU(00,E800,5) , // #55 [ref=1x]
+ O_FPU(00,0000,_) , // #56 [ref=1x]
+ O(000F00,00,0,0,1,0,0,_ ), // #57 [ref=1x]
+ O(000000,00,5,0,0,0,0,_ ), // #58 [ref=3x]
+ V(660F00,00,0,1,0,0,0,_ ), // #59 [ref=7x]
+ V(660F00,00,0,1,1,0,0,_ ), // #60 [ref=6x]
+ V(000F00,00,0,1,1,0,0,_ ), // #61 [ref=7x]
+ V(000F00,00,0,1,0,0,0,_ ), // #62 [ref=8x]
+ V(660F00,00,0,0,0,0,0,_ ), // #63 [ref=15x]
+ V(660F00,00,0,0,1,0,0,_ ), // #64 [ref=4x]
+ V(000F00,00,0,0,1,0,0,_ ), // #65 [ref=4x]
+ V(000F00,00,0,0,0,0,0,_ ), // #66 [ref=10x]
+ V(660F3A,00,0,0,0,0,0,_ ), // #67 [ref=45x]
+ V(660F3A,00,0,0,1,0,0,_ ), // #68 [ref=4x]
+ O(000F00,00,2,0,0,0,0,_ ), // #69 [ref=5x]
+ O(000F00,00,5,0,0,0,0,_ ), // #70 [ref=4x]
+ O(000F00,00,3,0,0,0,0,_ ), // #71 [ref=5x]
+ V(XOP_M9,00,0,0,0,0,0,_ ), // #72 [ref=32x]
+ O(000F00,00,6,0,0,0,0,_ ), // #73 [ref=5x]
+ V(XOP_MA,00,0,0,0,0,0,_ ), // #74 [ref=1x]
+ V(XOP_MA,00,1,0,0,0,0,_ ), // #75 [ref=1x]
+ O(000F38,00,0,0,0,0,0,_ ), // #76 [ref=23x]
+ V(F20F38,00,0,0,0,0,0,_ ), // #77 [ref=3x]
+ O(000000,00,3,0,0,0,0,_ ), // #78 [ref=3x]
+ O(000F3A,00,0,0,0,0,0,_ ), // #79 [ref=4x]
+ O(F30000,00,0,0,0,0,0,_ ), // #80 [ref=1x]
+ O(000F0F,00,0,0,0,0,0,_ ), // #81 [ref=26x]
+ V(F30F38,00,0,0,0,0,0,_ ), // #82 [ref=2x]
+ O(000F3A,00,0,0,1,0,0,_ ), // #83 [ref=1x]
+ O(660F3A,00,0,0,1,0,0,_ ), // #84 [ref=1x]
+ O(F30F00,00,1,0,0,0,0,_ ), // #85 [ref=1x]
+ O(F30F00,00,7,0,0,0,0,_ ), // #86 [ref=1x]
+ V(F20F3A,00,0,0,0,0,0,_ ), // #87 [ref=1x]
+ V(660F38,00,0,0,0,0,0,_ ), // #88 [ref=22x]
+ O(000F00,00,4,0,0,0,0,_ ), // #89 [ref=4x]
+ V(XOP_M9,00,7,0,0,0,0,_ ), // #90 [ref=1x]
+ V(XOP_M9,00,4,0,0,0,0,_ ), // #91 [ref=1x]
+ E(F20F38,00,0,2,0,0,2,T4X), // #92 [ref=6x]
+ V(660F00,00,0,0,0,1,4,FV ), // #93 [ref=22x]
+ V(000F00,00,0,0,0,0,4,FV ), // #94 [ref=16x]
+ V(F20F00,00,0,0,0,1,3,T1S), // #95 [ref=10x]
+ V(F30F00,00,0,0,0,0,2,T1S), // #96 [ref=10x]
+ V(F20F00,00,0,0,0,0,0,_ ), // #97 [ref=4x]
+ V(660F38,00,0,0,0,0,4,FVM), // #98 [ref=14x]
+ E(660F3A,00,0,0,0,0,4,FV ), // #99 [ref=14x]
+ E(660F3A,00,0,0,0,1,4,FV ), // #100 [ref=14x]
+ E(660F38,00,0,0,0,0,4,FVM), // #101 [ref=9x]
+ E(660F38,00,0,0,0,0,4,FV ), // #102 [ref=22x]
+ E(660F38,00,0,0,0,1,4,FV ), // #103 [ref=28x]
+ E(660F38,00,0,0,0,1,4,FVM), // #104 [ref=9x]
+ V(660F38,00,0,1,0,0,0,_ ), // #105 [ref=2x]
+ E(660F38,00,0,0,0,0,3,T2 ), // #106 [ref=2x]
+ E(660F38,00,0,0,0,0,4,T4 ), // #107 [ref=2x]
+ E(660F38,00,0,2,0,0,5,T8 ), // #108 [ref=2x]
+ E(660F38,00,0,0,0,1,4,T2 ), // #109 [ref=2x]
+ E(660F38,00,0,2,0,1,5,T4 ), // #110 [ref=2x]
+ V(660F38,00,0,0,0,1,3,T1S), // #111 [ref=2x]
+ V(660F38,00,0,0,0,0,2,T1S), // #112 [ref=14x]
+ V(660F00,00,0,0,0,1,3,T1S), // #113 [ref=5x]
+ V(000F00,00,0,0,0,0,2,T1S), // #114 [ref=2x]
+ E(660F38,00,0,0,0,1,3,T1S), // #115 [ref=14x]
+ E(660F38,00,0,0,0,0,2,T1S), // #116 [ref=14x]
+ V(F30F00,00,0,0,0,0,3,HV ), // #117 [ref=1x]
+ E(F20F38,00,0,0,0,0,0,_ ), // #118 [ref=1x]
+ E(F30F38,00,0,0,0,0,0,_ ), // #119 [ref=7x]
+ V(F20F00,00,0,0,0,1,4,FV ), // #120 [ref=1x]
+ E(660F00,00,0,0,0,1,4,FV ), // #121 [ref=9x]
+ E(000F00,00,0,0,0,1,4,FV ), // #122 [ref=3x]
+ V(660F38,00,0,0,0,0,3,HVM), // #123 [ref=7x]
+ V(660F00,00,0,0,0,0,4,FV ), // #124 [ref=11x]
+ V(000F00,00,0,0,0,0,4,HV ), // #125 [ref=1x]
+ V(660F3A,00,0,0,0,0,3,HVM), // #126 [ref=1x]
+ E(660F00,00,0,0,0,0,3,HV ), // #127 [ref=4x]
+ E(000F00,00,0,0,0,0,4,FV ), // #128 [ref=2x]
+ E(F30F00,00,0,0,0,1,4,FV ), // #129 [ref=2x]
+ V(F20F00,00,0,0,0,0,3,T1F), // #130 [ref=2x]
+ E(F20F00,00,0,0,0,0,3,T1F), // #131 [ref=2x]
+ V(F20F00,00,0,0,0,0,2,T1W), // #132 [ref=1x]
+ V(F30F00,00,0,0,0,0,2,T1W), // #133 [ref=1x]
+ V(F30F00,00,0,0,0,0,2,T1F), // #134 [ref=2x]
+ E(F30F00,00,0,0,0,0,2,T1F), // #135 [ref=2x]
+ V(F30F00,00,0,0,0,0,4,FV ), // #136 [ref=1x]
+ E(F30F00,00,0,0,0,0,3,HV ), // #137 [ref=1x]
+ E(F20F00,00,0,0,0,0,4,FV ), // #138 [ref=1x]
+ E(F20F00,00,0,0,0,1,4,FV ), // #139 [ref=1x]
+ E(F20F00,00,0,0,0,0,2,T1W), // #140 [ref=1x]
+ E(F30F00,00,0,0,0,0,2,T1W), // #141 [ref=1x]
+ E(660F3A,00,0,0,0,0,4,FVM), // #142 [ref=5x]
+ E(660F38,00,0,2,0,1,4,FV ), // #143 [ref=3x]
+ E(660F38,00,0,2,0,0,4,FV ), // #144 [ref=3x]
+ V(660F3A,00,0,1,0,0,0,_ ), // #145 [ref=6x]
+ E(660F3A,00,0,0,0,0,4,T4 ), // #146 [ref=4x]
+ E(660F3A,00,0,2,0,0,5,T8 ), // #147 [ref=4x]
+ E(660F3A,00,0,0,0,1,4,T2 ), // #148 [ref=4x]
+ E(660F3A,00,0,2,0,1,5,T4 ), // #149 [ref=4x]
+ V(660F3A,00,0,0,0,0,2,T1S), // #150 [ref=4x]
+ E(660F3A,00,0,0,0,1,3,T1S), // #151 [ref=6x]
+ E(660F3A,00,0,0,0,0,2,T1S), // #152 [ref=6x]
+ V(660F38,00,0,0,1,1,4,FV ), // #153 [ref=20x]
+ V(660F38,00,0,0,0,0,4,FV ), // #154 [ref=32x]
+ V(660F38,00,0,0,1,1,3,T1S), // #155 [ref=12x]
+ V(660F38,00,0,0,1,0,0,_ ), // #156 [ref=5x]
+ E(660F38,00,1,2,0,1,3,T1S), // #157 [ref=2x]
+ E(660F38,00,1,2,0,0,2,T1S), // #158 [ref=2x]
+ E(660F38,00,2,2,0,1,3,T1S), // #159 [ref=2x]
+ E(660F38,00,2,2,0,0,2,T1S), // #160 [ref=2x]
+ V(660F3A,00,0,0,1,1,4,FV ), // #161 [ref=2x]
+ V(000F00,00,2,0,0,0,0,_ ), // #162 [ref=1x]
+ V(660F00,00,0,0,0,1,4,FVM), // #163 [ref=3x]
+ V(000F00,00,0,0,0,0,4,FVM), // #164 [ref=3x]
+ V(660F00,00,0,0,0,0,2,T1S), // #165 [ref=1x]
+ V(F20F00,00,0,0,0,1,3,DUP), // #166 [ref=1x]
+ E(660F00,00,0,0,0,0,4,FVM), // #167 [ref=1x]
+ E(660F00,00,0,0,0,1,4,FVM), // #168 [ref=1x]
+ V(F30F00,00,0,0,0,0,0,_ ), // #169 [ref=3x]
+ E(F20F00,00,0,0,0,1,4,FVM), // #170 [ref=1x]
+ E(F30F00,00,0,0,0,0,4,FVM), // #171 [ref=1x]
+ E(F30F00,00,0,0,0,1,4,FVM), // #172 [ref=1x]
+ E(F20F00,00,0,0,0,0,4,FVM), // #173 [ref=1x]
+ V(000F00,00,0,0,0,0,3,T2 ), // #174 [ref=2x]
+ V(660F00,00,0,0,0,0,4,FVM), // #175 [ref=33x]
+ V(F30F00,00,0,0,0,0,4,FVM), // #176 [ref=3x]
+ O(F30F00,00,6,0,0,0,0,_ ), // #177 [ref=1x]
+ V(660F3A,00,0,0,0,0,4,FVM), // #178 [ref=2x]
+ E(660F00,00,0,0,0,0,4,FV ), // #179 [ref=5x]
+ V(660F38,00,0,0,0,0,0,T1S), // #180 [ref=1x]
+ E(F30F38,00,0,0,0,1,0,_ ), // #181 [ref=5x]
+ V(660F38,00,0,0,0,0,1,T1S), // #182 [ref=1x]
+ V(XOP_M8,00,0,0,0,0,0,_ ), // #183 [ref=22x]
+ V(660F38,00,0,0,0,1,4,FVM), // #184 [ref=2x]
+ E(660F3A,00,0,0,0,1,4,FVM), // #185 [ref=2x]
+ E(660F38,00,0,0,0,0,0,T1S), // #186 [ref=2x]
+ E(660F38,00,0,0,0,1,1,T1S), // #187 [ref=2x]
+ V(660F38,00,0,0,0,1,4,FV ), // #188 [ref=3x]
+ E(660F38,00,0,0,1,1,4,FV ), // #189 [ref=1x]
+ V(660F3A,00,0,0,0,0,0,T1S), // #190 [ref=2x]
+ V(660F3A,00,0,0,1,1,3,T1S), // #191 [ref=2x]
+ V(660F3A,00,0,0,0,0,1,T1S), // #192 [ref=1x]
+ V(660F00,00,0,0,0,0,1,T1S), // #193 [ref=1x]
+ E(F30F38,00,0,0,0,0,2,QVM), // #194 [ref=6x]
+ E(F30F38,00,0,0,0,0,3,HVM), // #195 [ref=9x]
+ E(F30F38,00,0,0,0,0,1,OVM), // #196 [ref=3x]
+ V(660F38,00,0,0,0,0,2,QVM), // #197 [ref=4x]
+ V(660F38,00,0,0,0,0,1,OVM), // #198 [ref=2x]
+ E(660F00,00,1,0,0,0,4,FV ), // #199 [ref=1x]
+ E(660F00,00,1,0,0,1,4,FV ), // #200 [ref=1x]
+ V(F20F00,00,0,0,0,0,4,FVM), // #201 [ref=1x]
+ V(660F00,00,0,0,0,0,4,128), // #202 [ref=5x]
+ V(660F00,00,7,0,0,0,4,FVM), // #203 [ref=1x]
+ V(660F00,00,0,0,0,1,4,128), // #204 [ref=2x]
+ E(660F00,00,0,0,0,1,4,128), // #205 [ref=1x]
+ V(660F00,00,3,0,0,0,4,FVM), // #206 [ref=1x]
+ E(F30F38,00,0,0,0,0,4,FVM), // #207 [ref=1x]
+ E(F30F38,00,0,0,0,0,4,FV ), // #208 [ref=1x]
+ E(F30F38,00,0,0,0,1,4,FV ), // #209 [ref=1x]
+ E(F30F38,00,0,0,0,1,4,FVM), // #210 [ref=1x]
+ E(660F38,00,5,2,0,1,3,T1S), // #211 [ref=2x]
+ E(660F38,00,5,2,0,0,2,T1S), // #212 [ref=2x]
+ E(660F38,00,6,2,0,1,3,T1S), // #213 [ref=2x]
+ E(660F38,00,6,2,0,0,2,T1S), // #214 [ref=2x]
+ V(000F00,00,3,0,0,0,0,_ ), // #215 [ref=1x]
+ O(F30F00,00,2,0,0,0,0,_ ), // #216 [ref=1x]
+ O(F30F00,00,3,0,0,0,0,_ ), // #217 [ref=1x]
+ O(000F00,00,5,0,1,0,0,_ ), // #218 [ref=2x]
+ O(000F00,00,3,0,1,0,0,_ ), // #219 [ref=1x]
+ O(000F00,00,4,0,1,0,0,_ ), // #220 [ref=2x]
+ O(000F00,00,6,0,1,0,0,_ ) // #221 [ref=1x]
+};
+// ----------------------------------------------------------------------------
+// ${MainOpcodeTable:End}
+
+// ${AltOpcodeTable:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+const uint32_t InstDB::_altOpcodeTable[] = {
+ 0 , // #0 [ref=1359x]
+ O(660F00,1B,_,_,_,_,_,_ ), // #1 [ref=1x]
+ O(000F00,BA,4,_,x,_,_,_ ), // #2 [ref=1x]
+ O(000F00,BA,7,_,x,_,_,_ ), // #3 [ref=1x]
+ O(000F00,BA,6,_,x,_,_,_ ), // #4 [ref=1x]
+ O(000F00,BA,5,_,x,_,_,_ ), // #5 [ref=1x]
+ O(000000,48,_,_,x,_,_,_ ), // #6 [ref=1x]
+ O(660F00,78,0,_,_,_,_,_ ), // #7 [ref=1x]
+ O_FPU(00,00DF,5) , // #8 [ref=1x]
+ O_FPU(00,00DF,7) , // #9 [ref=1x]
+ O_FPU(00,00DD,1) , // #10 [ref=1x]
+ O_FPU(00,00DB,5) , // #11 [ref=1x]
+ O_FPU(00,DFE0,_) , // #12 [ref=1x]
+ O(000000,DB,7,_,_,_,_,_ ), // #13 [ref=1x]
+ O_FPU(9B,DFE0,_) , // #14 [ref=1x]
+ O(000000,E4,_,_,_,_,_,_ ), // #15 [ref=1x]
+ O(000000,40,_,_,x,_,_,_ ), // #16 [ref=1x]
+ O(F20F00,78,_,_,_,_,_,_ ), // #17 [ref=1x]
+ O(000000,77,_,_,_,_,_,_ ), // #18 [ref=2x]
+ O(000000,73,_,_,_,_,_,_ ), // #19 [ref=3x]
+ O(000000,72,_,_,_,_,_,_ ), // #20 [ref=3x]
+ O(000000,76,_,_,_,_,_,_ ), // #21 [ref=2x]
+ O(000000,74,_,_,_,_,_,_ ), // #22 [ref=2x]
+ O(000000,E3,_,_,_,_,_,_ ), // #23 [ref=1x]
+ O(000000,7F,_,_,_,_,_,_ ), // #24 [ref=2x]
+ O(000000,7D,_,_,_,_,_,_ ), // #25 [ref=2x]
+ O(000000,7C,_,_,_,_,_,_ ), // #26 [ref=2x]
+ O(000000,7E,_,_,_,_,_,_ ), // #27 [ref=2x]
+ O(000000,EB,_,_,_,_,_,_ ), // #28 [ref=1x]
+ O(000000,75,_,_,_,_,_,_ ), // #29 [ref=2x]
+ O(000000,71,_,_,_,_,_,_ ), // #30 [ref=1x]
+ O(000000,7B,_,_,_,_,_,_ ), // #31 [ref=2x]
+ O(000000,79,_,_,_,_,_,_ ), // #32 [ref=1x]
+ O(000000,70,_,_,_,_,_,_ ), // #33 [ref=1x]
+ O(000000,7A,_,_,_,_,_,_ ), // #34 [ref=2x]
+ O(000000,78,_,_,_,_,_,_ ), // #35 [ref=1x]
+ V(660F00,92,_,0,0,_,_,_ ), // #36 [ref=1x]
+ V(F20F00,92,_,0,0,_,_,_ ), // #37 [ref=1x]
+ V(F20F00,92,_,0,1,_,_,_ ), // #38 [ref=1x]
+ V(000F00,92,_,0,0,_,_,_ ), // #39 [ref=1x]
+ O(000000,E2,_,_,_,_,_,_ ), // #40 [ref=1x]
+ O(000000,E1,_,_,_,_,_,_ ), // #41 [ref=1x]
+ O(000000,E0,_,_,_,_,_,_ ), // #42 [ref=1x]
+ O(660F00,29,_,_,_,_,_,_ ), // #43 [ref=1x]
+ O(000F00,29,_,_,_,_,_,_ ), // #44 [ref=1x]
+ O(000F38,F1,_,_,x,_,_,_ ), // #45 [ref=1x]
+ O(000F00,7E,_,_,_,_,_,_ ), // #46 [ref=1x]
+ O(660F00,7F,_,_,_,_,_,_ ), // #47 [ref=1x]
+ O(F30F00,7F,_,_,_,_,_,_ ), // #48 [ref=1x]
+ O(660F00,17,_,_,_,_,_,_ ), // #49 [ref=1x]
+ O(000F00,17,_,_,_,_,_,_ ), // #50 [ref=1x]
+ O(660F00,13,_,_,_,_,_,_ ), // #51 [ref=1x]
+ O(000F00,13,_,_,_,_,_,_ ), // #52 [ref=1x]
+ O(660F00,E7,_,_,_,_,_,_ ), // #53 [ref=1x]
+ O(660F00,2B,_,_,_,_,_,_ ), // #54 [ref=1x]
+ O(000F00,2B,_,_,_,_,_,_ ), // #55 [ref=1x]
+ O(000F00,E7,_,_,_,_,_,_ ), // #56 [ref=1x]
+ O(F20F00,2B,_,_,_,_,_,_ ), // #57 [ref=1x]
+ O(F30F00,2B,_,_,_,_,_,_ ), // #58 [ref=1x]
+ O(000F00,7E,_,_,x,_,_,_ ), // #59 [ref=1x]
+ O(F20F00,11,_,_,_,_,_,_ ), // #60 [ref=1x]
+ O(F30F00,11,_,_,_,_,_,_ ), // #61 [ref=1x]
+ O(660F00,11,_,_,_,_,_,_ ), // #62 [ref=1x]
+ O(000F00,11,_,_,_,_,_,_ ), // #63 [ref=1x]
+ O(000000,E6,_,_,_,_,_,_ ), // #64 [ref=1x]
+ O(000F3A,15,_,_,_,_,_,_ ), // #65 [ref=1x]
+ O(000000,58,_,_,_,_,_,_ ), // #66 [ref=1x]
+ O(000F00,72,6,_,_,_,_,_ ), // #67 [ref=1x]
+ O(660F00,73,7,_,_,_,_,_ ), // #68 [ref=1x]
+ O(000F00,73,6,_,_,_,_,_ ), // #69 [ref=1x]
+ O(000F00,71,6,_,_,_,_,_ ), // #70 [ref=1x]
+ O(000F00,72,4,_,_,_,_,_ ), // #71 [ref=1x]
+ O(000F00,71,4,_,_,_,_,_ ), // #72 [ref=1x]
+ O(000F00,72,2,_,_,_,_,_ ), // #73 [ref=1x]
+ O(660F00,73,3,_,_,_,_,_ ), // #74 [ref=1x]
+ O(000F00,73,2,_,_,_,_,_ ), // #75 [ref=1x]
+ O(000F00,71,2,_,_,_,_,_ ), // #76 [ref=1x]
+ O(000000,50,_,_,_,_,_,_ ), // #77 [ref=1x]
+ O(000000,F6,_,_,x,_,_,_ ), // #78 [ref=1x]
+ V(660F38,92,_,x,_,1,3,T1S), // #79 [ref=1x]
+ V(660F38,92,_,x,_,0,2,T1S), // #80 [ref=1x]
+ V(660F38,93,_,x,_,1,3,T1S), // #81 [ref=1x]
+ V(660F38,93,_,x,_,0,2,T1S), // #82 [ref=1x]
+ V(660F38,2F,_,x,0,_,_,_ ), // #83 [ref=1x]
+ V(660F38,2E,_,x,0,_,_,_ ), // #84 [ref=1x]
+ V(660F00,29,_,x,I,1,4,FVM), // #85 [ref=1x]
+ V(000F00,29,_,x,I,0,4,FVM), // #86 [ref=1x]
+ V(660F00,7E,_,0,0,0,2,T1S), // #87 [ref=1x]
+ V(660F00,7F,_,x,I,_,_,_ ), // #88 [ref=1x]
+ E(660F00,7F,_,x,_,0,4,FVM), // #89 [ref=1x]
+ E(660F00,7F,_,x,_,1,4,FVM), // #90 [ref=1x]
+ V(F30F00,7F,_,x,I,_,_,_ ), // #91 [ref=1x]
+ E(F20F00,7F,_,x,_,1,4,FVM), // #92 [ref=1x]
+ E(F30F00,7F,_,x,_,0,4,FVM), // #93 [ref=1x]
+ E(F30F00,7F,_,x,_,1,4,FVM), // #94 [ref=1x]
+ E(F20F00,7F,_,x,_,0,4,FVM), // #95 [ref=1x]
+ V(660F00,17,_,0,I,1,3,T1S), // #96 [ref=1x]
+ V(000F00,17,_,0,I,0,3,T2 ), // #97 [ref=1x]
+ V(660F00,13,_,0,I,1,3,T1S), // #98 [ref=1x]
+ V(000F00,13,_,0,I,0,3,T2 ), // #99 [ref=1x]
+ V(660F00,7E,_,0,I,1,3,T1S), // #100 [ref=1x]
+ V(F20F00,11,_,I,I,1,3,T1S), // #101 [ref=1x]
+ V(F30F00,11,_,I,I,0,2,T1S), // #102 [ref=1x]
+ V(660F00,11,_,x,I,1,4,FVM), // #103 [ref=1x]
+ V(000F00,11,_,x,I,0,4,FVM), // #104 [ref=1x]
+ E(660F38,7A,_,x,0,0,0,T1S), // #105 [ref=1x]
+ E(660F38,7C,_,x,0,0,0,T1S), // #106 [ref=1x]
+ E(660F38,7C,_,x,0,1,0,T1S), // #107 [ref=1x]
+ E(660F38,7B,_,x,0,0,0,T1S), // #108 [ref=1x]
+ V(660F3A,05,_,x,0,1,4,FV ), // #109 [ref=1x]
+ V(660F3A,04,_,x,0,0,4,FV ), // #110 [ref=1x]
+ V(660F3A,01,_,x,1,1,4,FV ), // #111 [ref=1x]
+ V(660F3A,00,_,x,1,1,4,FV ), // #112 [ref=1x]
+ V(660F38,90,_,x,_,0,2,T1S), // #113 [ref=1x]
+ V(660F38,90,_,x,_,1,3,T1S), // #114 [ref=1x]
+ V(660F38,91,_,x,_,0,2,T1S), // #115 [ref=1x]
+ V(660F38,91,_,x,_,1,3,T1S), // #116 [ref=1x]
+ V(660F38,8E,_,x,0,_,_,_ ), // #117 [ref=1x]
+ V(660F38,8E,_,x,1,_,_,_ ), // #118 [ref=1x]
+ V(XOP_M8,C0,_,0,x,_,_,_ ), // #119 [ref=1x]
+ V(XOP_M8,C2,_,0,x,_,_,_ ), // #120 [ref=1x]
+ V(XOP_M8,C3,_,0,x,_,_,_ ), // #121 [ref=1x]
+ V(XOP_M8,C1,_,0,x,_,_,_ ), // #122 [ref=1x]
+ V(660F00,72,6,x,I,0,4,FV ), // #123 [ref=1x]
+ V(660F00,73,6,x,I,1,4,FV ), // #124 [ref=1x]
+ V(660F00,71,6,x,I,I,4,FVM), // #125 [ref=1x]
+ V(660F00,72,4,x,I,0,4,FV ), // #126 [ref=1x]
+ E(660F00,72,4,x,_,1,4,FV ), // #127 [ref=1x]
+ V(660F00,71,4,x,I,I,4,FVM), // #128 [ref=1x]
+ V(660F00,72,2,x,I,0,4,FV ), // #129 [ref=1x]
+ V(660F00,73,2,x,I,1,4,FV ), // #130 [ref=1x]
+ V(660F00,71,2,x,I,I,4,FVM) // #131 [ref=1x]
+};
+// ----------------------------------------------------------------------------
+// ${AltOpcodeTable:End}
+
+#undef O_FPU
+#undef O
+#undef V
+#undef E
+
+// ============================================================================
+// [asmjit::x86::InstDB - CommonInfoTableA]
+// ============================================================================
+
+// ${InstCommonTable:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+#define F(VAL) InstDB::kFlag##VAL
+#define CONTROL(VAL) Inst::kControl##VAL
+#define SINGLE_REG(VAL) InstDB::kSingleReg##VAL
+const InstDB::CommonInfo InstDB::_commonInfoTable[] = {
+ { 0 , 0 , 0 , CONTROL(None) , SINGLE_REG(None), 0 }, // #0 [ref=1x]
+ { 0 , 339, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #1 [ref=4x]
+ { 0 , 340, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #2 [ref=2x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 16 , 12, CONTROL(None) , SINGLE_REG(None), 0 }, // #3 [ref=2x]
+ { 0 , 151, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #4 [ref=2x]
+ { F(Vec) , 70 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #5 [ref=54x]
+ { F(Vec) , 97 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #6 [ref=19x]
+ { F(Vec) , 222, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #7 [ref=16x]
+ { F(Vec) , 183, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #8 [ref=20x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 28 , 11, CONTROL(None) , SINGLE_REG(RO) , 0 }, // #9 [ref=1x]
+ { F(Vex) , 237, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #10 [ref=3x]
+ { F(Vec) , 70 , 1 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #11 [ref=12x]
+ { 0 , 341, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #12 [ref=1x]
+ { F(Vex) , 239, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #13 [ref=5x]
+ { F(Vex) , 151, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #14 [ref=12x]
+ { F(Vec) , 342, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #15 [ref=4x]
+ { 0 , 241, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #16 [ref=3x]
+ { F(Mib) , 343, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #17 [ref=1x]
+ { 0 , 344, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #18 [ref=1x]
+ { 0 , 243, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #19 [ref=1x]
+ { F(Mib) , 345, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #20 [ref=1x]
+ { 0 , 245, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #21 [ref=1x]
+ { 0 , 150, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #22 [ref=35x]
+ { 0 , 346, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #23 [ref=3x]
+ { 0 , 114, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #24 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 114, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #25 [ref=3x]
+ { F(Rep)|F(RepIgnored) , 247, 2 , CONTROL(Call) , SINGLE_REG(None), 0 }, // #26 [ref=1x]
+ { 0 , 347, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #27 [ref=1x]
+ { 0 , 348, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #28 [ref=2x]
+ { 0 , 322, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #29 [ref=1x]
+ { 0 , 257, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #30 [ref=74x]
+ { 0 , 349, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #31 [ref=24x]
+ { 0 , 350, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #32 [ref=1x]
+ { 0 , 16 , 12, CONTROL(None) , SINGLE_REG(None), 0 }, // #33 [ref=1x]
+ { F(Rep) , 351, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #34 [ref=1x]
+ { F(Vec) , 352, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #35 [ref=2x]
+ { F(Vec) , 353, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #36 [ref=3x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 118, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #37 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 354, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #38 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 355, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #39 [ref=1x]
+ { 0 , 356, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #40 [ref=1x]
+ { 0 , 357, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #41 [ref=1x]
+ { 0 , 249, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #42 [ref=1x]
+ { F(Mmx)|F(Vec) , 358, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #43 [ref=2x]
+ { F(Mmx)|F(Vec) , 359, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #44 [ref=2x]
+ { F(Mmx)|F(Vec) , 360, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #45 [ref=2x]
+ { F(Vec) , 361, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #46 [ref=2x]
+ { F(Vec) , 362, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #47 [ref=2x]
+ { F(Vec) , 363, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #48 [ref=2x]
+ { 0 , 364, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #49 [ref=1x]
+ { 0 , 365, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #50 [ref=2x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 251, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #51 [ref=2x]
+ { 0 , 39 , 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #52 [ref=3x]
+ { F(Mmx) , 257, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #53 [ref=1x]
+ { 0 , 253, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #54 [ref=2x]
+ { 0 , 366, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #55 [ref=1x]
+ { F(Vec) , 367, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #56 [ref=2x]
+ { F(Vec) , 255, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #57 [ref=1x]
+ { F(FpuM32)|F(FpuM64) , 153, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #58 [ref=6x]
+ { 0 , 257, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #59 [ref=9x]
+ { F(FpuM80) , 368, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #60 [ref=2x]
+ { 0 , 258, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #61 [ref=13x]
+ { F(FpuM32)|F(FpuM64) , 259, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #62 [ref=2x]
+ { F(FpuM16)|F(FpuM32) , 369, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #63 [ref=9x]
+ { F(FpuM16)|F(FpuM32)|F(FpuM64) , 370, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #64 [ref=3x]
+ { F(FpuM32)|F(FpuM64)|F(FpuM80) , 371, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #65 [ref=2x]
+ { F(FpuM16) , 372, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #66 [ref=3x]
+ { F(FpuM16) , 373, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #67 [ref=2x]
+ { F(FpuM32)|F(FpuM64) , 260, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #68 [ref=1x]
+ { 0 , 374, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #69 [ref=2x]
+ { 0 , 39 , 10, CONTROL(None) , SINGLE_REG(None), 0 }, // #70 [ref=1x]
+ { 0 , 375, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #71 [ref=1x]
+ { F(Rep) , 376, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #72 [ref=1x]
+ { F(Vec) , 261, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #73 [ref=1x]
+ { 0 , 377, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #74 [ref=2x]
+ { 0 , 378, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #75 [ref=8x]
+ { 0 , 263, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #76 [ref=3x]
+ { 0 , 265, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #77 [ref=1x]
+ { 0 , 257, 1 , CONTROL(Return) , SINGLE_REG(None), 0 }, // #78 [ref=3x]
+ { 0 , 379, 1 , CONTROL(Return) , SINGLE_REG(None), 0 }, // #79 [ref=1x]
+ { F(Rep)|F(RepIgnored) , 267, 2 , CONTROL(Branch) , SINGLE_REG(None), 0 }, // #80 [ref=30x]
+ { F(Rep)|F(RepIgnored) , 269, 2 , CONTROL(Branch) , SINGLE_REG(None), 0 }, // #81 [ref=1x]
+ { F(Rep)|F(RepIgnored) , 271, 2 , CONTROL(Jump) , SINGLE_REG(None), 0 }, // #82 [ref=1x]
+ { F(Vec)|F(Vex) , 380, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #83 [ref=27x]
+ { F(Vec)|F(Vex) , 273, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #84 [ref=1x]
+ { F(Vec)|F(Vex) , 275, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #85 [ref=1x]
+ { F(Vec)|F(Vex) , 277, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #86 [ref=1x]
+ { F(Vec)|F(Vex) , 279, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #87 [ref=1x]
+ { F(Vec)|F(Vex) , 381, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #88 [ref=12x]
+ { F(Vec)|F(Vex) , 382, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #89 [ref=8x]
+ { 0 , 383, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #90 [ref=2x]
+ { 0 , 281, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #91 [ref=1x]
+ { F(Vec) , 192, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #92 [ref=2x]
+ { 0 , 384, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #93 [ref=2x]
+ { 0 , 283, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #94 [ref=2x]
+ { 0 , 385, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #95 [ref=1x]
+ { 0 , 156, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #96 [ref=3x]
+ { 0 , 386, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #97 [ref=5x]
+ { F(Vex) , 387, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #98 [ref=2x]
+ { F(Rep) , 388, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #99 [ref=1x]
+ { 0 , 269, 2 , CONTROL(Branch) , SINGLE_REG(None), 0 }, // #100 [ref=3x]
+ { 0 , 285, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #101 [ref=1x]
+ { F(Vex) , 389, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #102 [ref=2x]
+ { F(Vec) , 390, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #103 [ref=1x]
+ { F(Mmx) , 391, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #104 [ref=1x]
+ { 0 , 392, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #105 [ref=2x]
+ { F(XRelease) , 0 , 16, CONTROL(None) , SINGLE_REG(None), 0 }, // #106 [ref=1x]
+ { F(Vec) , 70 , 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #107 [ref=6x]
+ { 0 , 64 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #108 [ref=1x]
+ { F(Mmx)|F(Vec) , 287, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #109 [ref=1x]
+ { 0 , 393, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #110 [ref=1x]
+ { 0 , 68 , 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #111 [ref=2x]
+ { F(Mmx)|F(Vec) , 394, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #112 [ref=1x]
+ { F(Vec) , 256, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #113 [ref=2x]
+ { F(Vec) , 198, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #114 [ref=4x]
+ { F(Vec) , 395, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #115 [ref=2x]
+ { F(Vec) , 71 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #116 [ref=3x]
+ { F(Mmx) , 396, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #117 [ref=1x]
+ { F(Vec) , 98 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #118 [ref=1x]
+ { F(Vec) , 201, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #119 [ref=1x]
+ { F(Mmx)|F(Vec) , 94 , 5 , CONTROL(None) , SINGLE_REG(None), 0 }, // #120 [ref=1x]
+ { F(Mmx)|F(Vec) , 397, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #121 [ref=1x]
+ { F(Rep) , 398, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #122 [ref=1x]
+ { F(Vec) , 97 , 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #123 [ref=1x]
+ { F(Vec) , 289, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #124 [ref=1x]
+ { 0 , 291, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #125 [ref=2x]
+ { 0 , 399, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #126 [ref=1x]
+ { F(Vex) , 293, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #127 [ref=1x]
+ { 0 , 400, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #128 [ref=1x]
+ { 0 , 401, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #129 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 252, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #130 [ref=2x]
+ { 0 , 295, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #131 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 16 , 12, CONTROL(None) , SINGLE_REG(RO) , 0 }, // #132 [ref=1x]
+ { 0 , 402, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #133 [ref=1x]
+ { F(Rep) , 403, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #134 [ref=1x]
+ { F(Mmx)|F(Vec) , 297, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #135 [ref=40x]
+ { F(Mmx)|F(Vec) , 299, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #136 [ref=1x]
+ { F(Mmx)|F(Vec) , 297, 2 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #137 [ref=6x]
+ { F(Mmx)|F(Vec) , 297, 2 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #138 [ref=16x]
+ { F(Mmx) , 297, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #139 [ref=26x]
+ { F(Vec) , 70 , 1 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #140 [ref=4x]
+ { F(Vec) , 404, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #141 [ref=1x]
+ { F(Vec) , 405, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #142 [ref=1x]
+ { F(Vec) , 406, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #143 [ref=1x]
+ { F(Vec) , 407, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #144 [ref=1x]
+ { F(Vec) , 408, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #145 [ref=1x]
+ { F(Vec) , 409, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #146 [ref=1x]
+ { F(Mmx)|F(Vec) , 301, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #147 [ref=1x]
+ { F(Vec) , 410, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #148 [ref=1x]
+ { F(Vec) , 411, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #149 [ref=1x]
+ { F(Vec) , 412, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #150 [ref=1x]
+ { F(Mmx)|F(Vec) , 413, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #151 [ref=1x]
+ { F(Mmx)|F(Vec) , 414, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #152 [ref=1x]
+ { F(Vec) , 225, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #153 [ref=2x]
+ { 0 , 122, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #154 [ref=1x]
+ { 0 , 379, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #155 [ref=6x]
+ { F(Mmx) , 299, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #156 [ref=1x]
+ { F(Mmx)|F(Vec) , 303, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #157 [ref=8x]
+ { F(Vec) , 415, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #158 [ref=2x]
+ { 0 , 126, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #159 [ref=1x]
+ { 0 , 416, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #160 [ref=8x]
+ { 0 , 417, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #161 [ref=4x]
+ { 0 , 418, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #162 [ref=6x]
+ { 0 , 305, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #163 [ref=1x]
+ { F(Rep)|F(RepIgnored) , 307, 2 , CONTROL(Return) , SINGLE_REG(None), 0 }, // #164 [ref=1x]
+ { F(Vex) , 309, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #165 [ref=1x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 16 , 12, CONTROL(None) , SINGLE_REG(WO) , 0 }, // #166 [ref=3x]
+ { F(Rep) , 419, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #167 [ref=1x]
+ { 0 , 420, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #168 [ref=30x]
+ { 0 , 159, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #169 [ref=2x]
+ { 0 , 421, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #170 [ref=3x]
+ { F(Rep) , 422, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #171 [ref=1x]
+ { 0 , 57 , 7 , CONTROL(None) , SINGLE_REG(None), 0 }, // #172 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512T4X)|F(Avx512KZ) , 423, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #173 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512T4X)|F(Avx512KZ) , 424, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #174 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #175 [ref=22x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #176 [ref=22x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE) , 425, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #177 [ref=18x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #178 [ref=17x]
+ { F(Vec)|F(Vex) , 162, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #179 [ref=15x]
+ { F(Vec)|F(Vex)|F(Evex) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #180 [ref=5x]
+ { F(Vec)|F(Vex) , 70 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #181 [ref=17x]
+ { F(Vec)|F(Vex) , 183, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #182 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #183 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #184 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #185 [ref=10x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #186 [ref=12x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #187 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #188 [ref=6x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #189 [ref=13x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #190 [ref=16x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #191 [ref=19x]
+ { F(Vec)|F(Vex) , 165, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #192 [ref=6x]
+ { F(Vec)|F(Vex) , 311, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #193 [ref=3x]
+ { F(Vec)|F(Vex) , 427, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #194 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 428, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #195 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 429, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #196 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 430, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #197 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 431, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #198 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 428, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #199 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 432, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #200 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B64) , 168, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #201 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B32) , 168, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #202 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 433, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #203 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 434, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #204 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512SAE) , 97 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #205 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512SAE) , 222, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #206 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 171, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #207 [ref=6x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #208 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #209 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 313, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #210 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 313, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #211 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #212 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 313, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #213 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #214 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #215 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 180, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #216 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #217 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #218 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512ER_SAE) , 361, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #219 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512ER_SAE) , 361, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #220 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512ER_SAE) , 435, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #221 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #222 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512ER_SAE) , 363, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #223 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512ER_SAE) , 363, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #224 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B64) , 313, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #225 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B64) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #226 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B64) , 313, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #227 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B32) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #228 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B32) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #229 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B32) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #230 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512SAE) , 361, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #231 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512SAE) , 361, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #232 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512SAE) , 363, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #233 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512SAE) , 363, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #234 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #235 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512ER_SAE) , 435, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #236 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #237 [ref=3x]
+ { F(Vec)|F(Vex) , 165, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #238 [ref=9x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B64) , 74 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #239 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B32) , 74 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #240 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #241 [ref=9x]
+ { F(Vec)|F(Vex) , 181, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #242 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 436, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #243 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 182, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #244 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex) , 367, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #245 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B64) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #246 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B32) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #247 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE) , 437, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #248 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE) , 438, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #249 [ref=4x]
+ { F(Vec)|F(Vex) , 130, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #250 [ref=13x]
+ { F(Vec)|F(Vex) , 315, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #251 [ref=4x]
+ { F(Vec)|F(Vex) , 317, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #252 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512K_B64) , 439, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #253 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512K_B32) , 439, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #254 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512K) , 440, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #255 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512K) , 441, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #256 [ref=1x]
+ { F(Vec)|F(Vex) , 177, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #257 [ref=7x]
+ { F(Vec)|F(Vex) , 97 , 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #258 [ref=1x]
+ { F(Vec)|F(Vex) , 222, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #259 [ref=1x]
+ { F(Vec)|F(Vsib)|F(Vex)|F(Evex)|F(Avx512K) , 99 , 5 , CONTROL(None) , SINGLE_REG(None), 0 }, // #260 [ref=2x]
+ { F(Vec)|F(Vsib)|F(Vex)|F(Evex)|F(Avx512K) , 104, 5 , CONTROL(None) , SINGLE_REG(None), 0 }, // #261 [ref=2x]
+ { F(Vsib)|F(Evex)|F(Avx512K) , 442, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #262 [ref=4x]
+ { F(Vsib)|F(Evex)|F(Avx512K) , 443, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #263 [ref=4x]
+ { F(Vsib)|F(Evex)|F(Avx512K) , 444, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #264 [ref=8x]
+ { F(Vec)|F(Vsib)|F(Vex)|F(Evex)|F(Avx512K) , 109, 5 , CONTROL(None) , SINGLE_REG(None), 0 }, // #265 [ref=2x]
+ { F(Vec)|F(Vsib)|F(Vex)|F(Evex)|F(Avx512K) , 134, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #266 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE) , 425, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #267 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #268 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B64) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #269 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_SAE_B32) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #270 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #271 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #272 [ref=22x]
+ { F(Vec)|F(Vex) , 319, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #273 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 319, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #274 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 445, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #275 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex) , 438, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #276 [ref=1x]
+ { F(Vec)|F(Vex) , 192, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #277 [ref=1x]
+ { F(Vex) , 384, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #278 [ref=2x]
+ { F(Vec)|F(Vex) , 390, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #279 [ref=1x]
+ { F(Vec)|F(Vex) , 138, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #280 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #281 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #282 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_SAE) , 425, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #283 [ref=2x]
+ { 0 , 446, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #284 [ref=4x]
+ { 0 , 321, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #285 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 70 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #286 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex) , 323, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #287 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 186, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #288 [ref=1x]
+ { F(Vec)|F(Vex) , 70 , 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #289 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 70 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #290 [ref=6x]
+ { F(Vec)|F(Vex)|F(Evex) , 200, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #291 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex) , 325, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #292 [ref=4x]
+ { F(Vec)|F(Vex) , 447, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #293 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex) , 189, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #294 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex) , 192, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #295 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex) , 195, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #296 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 198, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #297 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #298 [ref=5x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 201, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #299 [ref=1x]
+ { 0 , 327, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #300 [ref=1x]
+ { 0 , 329, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #301 [ref=1x]
+ { F(Vec)|F(Vex) , 162, 2 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #302 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #303 [ref=2x]
+ { F(Vec)|F(Vex) , 162, 2 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #304 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #305 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #306 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #307 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 448, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #308 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 449, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #309 [ref=1x]
+ { F(Vec)|F(Evex) , 450, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #310 [ref=6x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 204, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #311 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 451, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #312 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #313 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512K) , 207, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #314 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512K_B32) , 207, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #315 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512K) , 210, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #316 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512K_B32) , 210, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #317 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512K_B64) , 210, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #318 [ref=2x]
+ { F(Vec)|F(Vex) , 404, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #319 [ref=1x]
+ { F(Vec)|F(Vex) , 405, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #320 [ref=1x]
+ { F(Vec)|F(Vex) , 406, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #321 [ref=1x]
+ { F(Vec)|F(Vex) , 407, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #322 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512K_B64) , 207, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #323 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #324 [ref=6x]
+ { F(Vec)|F(Vex) , 166, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #325 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 163, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #326 [ref=2x]
+ { F(Vec)|F(Vex) , 142, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #327 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 76 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #328 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 146, 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #329 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex) , 408, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #330 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex) , 409, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #331 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex) , 452, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #332 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 453, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #333 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 454, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #334 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 455, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #335 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 456, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #336 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #337 [ref=4x]
+ { F(Vec)|F(Vex) , 311, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #338 [ref=12x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 162, 3 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #339 [ref=8x]
+ { F(Vec)|F(Evex) , 457, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #340 [ref=4x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 213, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #341 [ref=6x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 216, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #342 [ref=9x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 219, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #343 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 222, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #344 [ref=4x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 225, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #345 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 174, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #346 [ref=6x]
+ { F(Vec)|F(Vex) , 130, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #347 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #348 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #349 [ref=3x]
+ { F(Vec)|F(Vex) , 331, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #350 [ref=4x]
+ { F(Vec)|F(Vsib)|F(Evex)|F(Avx512K) , 228, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #351 [ref=3x]
+ { F(Vec)|F(Vsib)|F(Evex)|F(Avx512K) , 333, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #352 [ref=2x]
+ { F(Vec)|F(Vsib)|F(Evex)|F(Avx512K) , 231, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #353 [ref=2x]
+ { F(Vec)|F(Vex) , 335, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #354 [ref=8x]
+ { F(Vec)|F(Evex)|F(Avx512K) , 234, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #355 [ref=5x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #356 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #357 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 82 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #358 [ref=3x]
+ { F(Vec)|F(Vex)|F(Evex) , 183, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #359 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 82 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #360 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 82 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #361 [ref=3x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 88 , 6 , CONTROL(None) , SINGLE_REG(None), 0 }, // #362 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ) , 162, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #363 [ref=6x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #364 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(WO) , 0 }, // #365 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512K_B32) , 234, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #366 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512K_B64) , 234, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #367 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 425, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #368 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #369 [ref=2x]
+ { F(Vec)|F(Vex) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #370 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 437, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #371 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ) , 438, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #372 [ref=1x]
+ { F(Vec)|F(Vex) , 183, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #373 [ref=2x]
+ { F(Vec)|F(Vex) , 437, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #374 [ref=1x]
+ { F(Vec)|F(Vex) , 438, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #375 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #376 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE_B32) , 162, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #377 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE) , 425, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #378 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_ER_SAE) , 426, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #379 [ref=1x]
+ { F(Vec)|F(Vsib)|F(Evex)|F(Avx512K) , 337, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #380 [ref=1x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B32) , 166, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #381 [ref=2x]
+ { F(Vec)|F(Evex)|F(Avx512KZ_B64) , 166, 2 , CONTROL(None) , SINGLE_REG(None), 0 }, // #382 [ref=2x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B32) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #383 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_B64) , 165, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #384 [ref=1x]
+ { F(Vec)|F(Vex)|F(Evex)|F(Avx512KZ_ER_SAE_B64) , 177, 3 , CONTROL(None) , SINGLE_REG(None), 0 }, // #385 [ref=1x]
+ { F(Vec)|F(Vex) , 257, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #386 [ref=2x]
+ { F(Lock)|F(XAcquire)|F(XRelease) , 49 , 4 , CONTROL(None) , SINGLE_REG(None), 0 }, // #387 [ref=1x]
+ { 0 , 458, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #388 [ref=1x]
+ { F(Lock)|F(XAcquire) , 49 , 8 , CONTROL(None) , SINGLE_REG(RO) , 0 }, // #389 [ref=1x]
+ { 0 , 459, 1 , CONTROL(None) , SINGLE_REG(None), 0 }, // #390 [ref=6x]
+ { 0 , 460, 1 , CONTROL(None) , SINGLE_REG(None), 0 } // #391 [ref=6x]
+};
+#undef SINGLE_REG
+#undef CONTROL
+#undef F
+// ----------------------------------------------------------------------------
+// ${InstCommonTable:End}
+
+// ============================================================================
+// [asmjit::x86::InstDB - CommonInfoTableB]
+// ============================================================================
+
+// ${InstCommonInfoTableB:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+#define EXT(VAL) uint32_t(Features::k##VAL)
+const InstDB::CommonInfoTableB InstDB::_commonInfoTableB[] = {
+ { { 0 }, 0, 0 }, // #0 [ref=144x]
+ { { 0 }, 1, 0 }, // #1 [ref=32x]
+ { { 0 }, 2, 0 }, // #2 [ref=2x]
+ { { EXT(ADX) }, 3, 0 }, // #3 [ref=1x]
+ { { EXT(SSE2) }, 0, 0 }, // #4 [ref=65x]
+ { { EXT(SSE) }, 0, 0 }, // #5 [ref=44x]
+ { { EXT(SSE3) }, 0, 0 }, // #6 [ref=12x]
+ { { EXT(ADX) }, 4, 0 }, // #7 [ref=1x]
+ { { EXT(AESNI) }, 0, 0 }, // #8 [ref=6x]
+ { { EXT(BMI) }, 1, 0 }, // #9 [ref=6x]
+ { { 0 }, 5, 0 }, // #10 [ref=5x]
+ { { EXT(TBM) }, 0, 0 }, // #11 [ref=9x]
+ { { EXT(SSE4_1) }, 0, 0 }, // #12 [ref=47x]
+ { { EXT(MPX) }, 0, 0 }, // #13 [ref=7x]
+ { { 0 }, 6, 0 }, // #14 [ref=4x]
+ { { EXT(BMI2) }, 1, 0 }, // #15 [ref=1x]
+ { { EXT(SMAP) }, 7, 0 }, // #16 [ref=2x]
+ { { 0 }, 8, 0 }, // #17 [ref=2x]
+ { { 0 }, 9, 0 }, // #18 [ref=2x]
+ { { EXT(CLDEMOTE) }, 0, 0 }, // #19 [ref=1x]
+ { { EXT(CLFLUSH) }, 0, 0 }, // #20 [ref=1x]
+ { { EXT(CLFLUSHOPT) }, 0, 0 }, // #21 [ref=1x]
+ { { EXT(SVM) }, 0, 0 }, // #22 [ref=6x]
+ { { 0 }, 10, 0 }, // #23 [ref=2x]
+ { { EXT(CLWB) }, 0, 0 }, // #24 [ref=1x]
+ { { EXT(CLZERO) }, 0, 0 }, // #25 [ref=1x]
+ { { 0 }, 3, 0 }, // #26 [ref=1x]
+ { { EXT(CMOV) }, 11, 0 }, // #27 [ref=6x]
+ { { EXT(CMOV) }, 12, 0 }, // #28 [ref=8x]
+ { { EXT(CMOV) }, 13, 0 }, // #29 [ref=6x]
+ { { EXT(CMOV) }, 14, 0 }, // #30 [ref=4x]
+ { { EXT(CMOV) }, 15, 0 }, // #31 [ref=4x]
+ { { EXT(CMOV) }, 16, 0 }, // #32 [ref=2x]
+ { { EXT(CMOV) }, 17, 0 }, // #33 [ref=6x]
+ { { EXT(CMOV) }, 18, 0 }, // #34 [ref=2x]
+ { { 0 }, 19, 0 }, // #35 [ref=2x]
+ { { EXT(I486) }, 1, 0 }, // #36 [ref=2x]
+ { { EXT(CMPXCHG16B) }, 5, 0 }, // #37 [ref=1x]
+ { { EXT(CMPXCHG8B) }, 5, 0 }, // #38 [ref=1x]
+ { { EXT(SSE2) }, 1, 0 }, // #39 [ref=2x]
+ { { EXT(SSE) }, 1, 0 }, // #40 [ref=2x]
+ { { EXT(I486) }, 0, 0 }, // #41 [ref=4x]
+ { { EXT(SSE4_2) }, 0, 0 }, // #42 [ref=2x]
+ { { 0 }, 20, 0 }, // #43 [ref=2x]
+ { { EXT(MMX) }, 0, 0 }, // #44 [ref=1x]
+ { { EXT(ENQCMD) }, 0, 0 }, // #45 [ref=2x]
+ { { EXT(SSE4A) }, 0, 0 }, // #46 [ref=4x]
+ { { 0 }, 21, 0 }, // #47 [ref=4x]
+ { { EXT(3DNOW) }, 0, 0 }, // #48 [ref=21x]
+ { { EXT(FXSR) }, 0, 0 }, // #49 [ref=4x]
+ { { EXT(SMX) }, 0, 0 }, // #50 [ref=1x]
+ { { EXT(GFNI) }, 0, 0 }, // #51 [ref=3x]
+ { { 0 }, 16, 0 }, // #52 [ref=5x]
+ { { EXT(VMX) }, 0, 0 }, // #53 [ref=12x]
+ { { 0 }, 11, 0 }, // #54 [ref=8x]
+ { { 0 }, 12, 0 }, // #55 [ref=12x]
+ { { 0 }, 13, 0 }, // #56 [ref=10x]
+ { { 0 }, 14, 0 }, // #57 [ref=8x]
+ { { 0 }, 15, 0 }, // #58 [ref=8x]
+ { { 0 }, 17, 0 }, // #59 [ref=8x]
+ { { 0 }, 18, 0 }, // #60 [ref=4x]
+ { { EXT(AVX512_DQ) }, 0, 0 }, // #61 [ref=23x]
+ { { EXT(AVX512_BW) }, 0, 0 }, // #62 [ref=22x]
+ { { EXT(AVX512_F) }, 0, 0 }, // #63 [ref=37x]
+ { { EXT(AVX512_DQ) }, 1, 0 }, // #64 [ref=3x]
+ { { EXT(AVX512_BW) }, 1, 0 }, // #65 [ref=4x]
+ { { EXT(AVX512_F) }, 1, 0 }, // #66 [ref=1x]
+ { { EXT(LAHFSAHF) }, 22, 0 }, // #67 [ref=1x]
+ { { EXT(LWP) }, 0, 0 }, // #68 [ref=4x]
+ { { 0 }, 23, 0 }, // #69 [ref=3x]
+ { { EXT(LZCNT) }, 1, 0 }, // #70 [ref=1x]
+ { { EXT(MMX2) }, 0, 0 }, // #71 [ref=8x]
+ { { EXT(MONITOR) }, 0, 0 }, // #72 [ref=2x]
+ { { EXT(MONITORX) }, 0, 0 }, // #73 [ref=2x]
+ { { EXT(MOVBE) }, 0, 0 }, // #74 [ref=1x]
+ { { EXT(MMX), EXT(SSE2) }, 0, 0 }, // #75 [ref=46x]
+ { { EXT(MOVDIR64B) }, 0, 0 }, // #76 [ref=1x]
+ { { EXT(MOVDIRI) }, 0, 0 }, // #77 [ref=1x]
+ { { EXT(BMI2) }, 0, 0 }, // #78 [ref=7x]
+ { { EXT(SSSE3) }, 0, 0 }, // #79 [ref=15x]
+ { { EXT(MMX2), EXT(SSE2) }, 0, 0 }, // #80 [ref=10x]
+ { { EXT(PCLMULQDQ) }, 0, 0 }, // #81 [ref=1x]
+ { { EXT(SSE4_2) }, 1, 0 }, // #82 [ref=4x]
+ { { EXT(PCOMMIT) }, 0, 0 }, // #83 [ref=1x]
+ { { EXT(MMX2), EXT(SSE2), EXT(SSE4_1) }, 0, 0 }, // #84 [ref=1x]
+ { { EXT(3DNOW2) }, 0, 0 }, // #85 [ref=5x]
+ { { EXT(GEODE) }, 0, 0 }, // #86 [ref=2x]
+ { { EXT(POPCNT) }, 1, 0 }, // #87 [ref=1x]
+ { { 0 }, 24, 0 }, // #88 [ref=3x]
+ { { EXT(PREFETCHW) }, 1, 0 }, // #89 [ref=1x]
+ { { EXT(PREFETCHWT1) }, 1, 0 }, // #90 [ref=1x]
+ { { EXT(SSE4_1) }, 1, 0 }, // #91 [ref=1x]
+ { { 0 }, 25, 0 }, // #92 [ref=3x]
+ { { 0 }, 26, 0 }, // #93 [ref=2x]
+ { { EXT(FSGSBASE) }, 0, 0 }, // #94 [ref=4x]
+ { { EXT(MSR) }, 0, 0 }, // #95 [ref=2x]
+ { { EXT(RDPID) }, 0, 0 }, // #96 [ref=1x]
+ { { EXT(RDRAND) }, 1, 0 }, // #97 [ref=1x]
+ { { EXT(RDSEED) }, 1, 0 }, // #98 [ref=1x]
+ { { EXT(RDTSC) }, 0, 0 }, // #99 [ref=1x]
+ { { EXT(RDTSCP) }, 0, 0 }, // #100 [ref=1x]
+ { { 0 }, 27, 0 }, // #101 [ref=2x]
+ { { EXT(LAHFSAHF) }, 28, 0 }, // #102 [ref=1x]
+ { { EXT(SHA) }, 0, 0 }, // #103 [ref=7x]
+ { { EXT(SKINIT) }, 0, 0 }, // #104 [ref=2x]
+ { { EXT(AVX512_4FMAPS) }, 0, 0 }, // #105 [ref=4x]
+ { { EXT(AVX), EXT(AVX512_F), EXT(AVX512_VL) }, 0, 0 }, // #106 [ref=46x]
+ { { EXT(AVX), EXT(AVX512_F) }, 0, 0 }, // #107 [ref=32x]
+ { { EXT(AVX) }, 0, 0 }, // #108 [ref=37x]
+ { { EXT(AESNI), EXT(AVX), EXT(AVX512_F), EXT(AVX512_VL), EXT(VAES) }, 0, 0 }, // #109 [ref=4x]
+ { { EXT(AESNI), EXT(AVX) }, 0, 0 }, // #110 [ref=2x]
+ { { EXT(AVX512_F), EXT(AVX512_VL) }, 0, 0 }, // #111 [ref=112x]
+ { { EXT(AVX), EXT(AVX512_DQ), EXT(AVX512_VL) }, 0, 0 }, // #112 [ref=8x]
+ { { EXT(AVX512_BW), EXT(AVX512_VL) }, 0, 0 }, // #113 [ref=26x]
+ { { EXT(AVX512_DQ), EXT(AVX512_VL) }, 0, 0 }, // #114 [ref=30x]
+ { { EXT(AVX2) }, 0, 0 }, // #115 [ref=7x]
+ { { EXT(AVX), EXT(AVX2), EXT(AVX512_F), EXT(AVX512_VL) }, 0, 0 }, // #116 [ref=39x]
+ { { EXT(AVX), EXT(AVX512_F) }, 1, 0 }, // #117 [ref=4x]
+ { { EXT(AVX512_BF16), EXT(AVX512_VL) }, 0, 0 }, // #118 [ref=3x]
+ { { EXT(AVX512_F), EXT(AVX512_VL), EXT(F16C) }, 0, 0 }, // #119 [ref=2x]
+ { { EXT(AVX512_ERI) }, 0, 0 }, // #120 [ref=10x]
+ { { EXT(AVX512_F), EXT(AVX512_VL), EXT(FMA) }, 0, 0 }, // #121 [ref=36x]
+ { { EXT(AVX512_F), EXT(FMA) }, 0, 0 }, // #122 [ref=24x]
+ { { EXT(FMA4) }, 0, 0 }, // #123 [ref=20x]
+ { { EXT(XOP) }, 0, 0 }, // #124 [ref=55x]
+ { { EXT(AVX2), EXT(AVX512_F), EXT(AVX512_VL) }, 0, 0 }, // #125 [ref=19x]
+ { { EXT(AVX512_PFI) }, 0, 0 }, // #126 [ref=16x]
+ { { EXT(AVX), EXT(AVX512_F), EXT(AVX512_VL), EXT(GFNI) }, 0, 0 }, // #127 [ref=3x]
+ { { EXT(AVX), EXT(AVX2) }, 0, 0 }, // #128 [ref=17x]
+ { { EXT(AVX512_4VNNIW) }, 0, 0 }, // #129 [ref=2x]
+ { { EXT(AVX), EXT(AVX2), EXT(AVX512_BW), EXT(AVX512_VL) }, 0, 0 }, // #130 [ref=54x]
+ { { EXT(AVX2), EXT(AVX512_BW), EXT(AVX512_VL) }, 0, 0 }, // #131 [ref=2x]
+ { { EXT(AVX512_CDI), EXT(AVX512_VL) }, 0, 0 }, // #132 [ref=6x]
+ { { EXT(AVX), EXT(AVX512_F), EXT(AVX512_VL), EXT(PCLMULQDQ), EXT(VPCLMULQDQ) }, 0, 0 }, // #133 [ref=1x]
+ { { EXT(AVX) }, 1, 0 }, // #134 [ref=7x]
+ { { EXT(AVX512_VBMI2), EXT(AVX512_VL) }, 0, 0 }, // #135 [ref=16x]
+ { { EXT(AVX512_VL), EXT(AVX512_VNNI) }, 0, 0 }, // #136 [ref=4x]
+ { { EXT(AVX512_VBMI), EXT(AVX512_VL) }, 0, 0 }, // #137 [ref=4x]
+ { { EXT(AVX), EXT(AVX512_BW) }, 0, 0 }, // #138 [ref=4x]
+ { { EXT(AVX), EXT(AVX512_DQ) }, 0, 0 }, // #139 [ref=4x]
+ { { EXT(AVX512_IFMA), EXT(AVX512_VL) }, 0, 0 }, // #140 [ref=2x]
+ { { EXT(AVX512_BITALG), EXT(AVX512_VL) }, 0, 0 }, // #141 [ref=3x]
+ { { EXT(AVX512_VL), EXT(AVX512_VPOPCNTDQ) }, 0, 0 }, // #142 [ref=2x]
+ { { EXT(WBNOINVD) }, 0, 0 }, // #143 [ref=1x]
+ { { EXT(RTM) }, 0, 0 }, // #144 [ref=3x]
+ { { EXT(XSAVE) }, 0, 0 }, // #145 [ref=6x]
+ { { EXT(XSAVES) }, 0, 0 }, // #146 [ref=4x]
+ { { EXT(XSAVEC) }, 0, 0 }, // #147 [ref=2x]
+ { { EXT(XSAVEOPT) }, 0, 0 }, // #148 [ref=2x]
+ { { EXT(TSX) }, 1, 0 } // #149 [ref=1x]
+};
+#undef EXT
+
+#define FLAG(VAL) uint32_t(Status::k##VAL)
+const InstDB::RWFlagsInfoTable InstDB::_rwFlagsInfoTable[] = {
+ { 0, 0 }, // #0 [ref=1281x]
+ { 0, FLAG(AF) | FLAG(CF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) }, // #1 [ref=76x]
+ { FLAG(CF), FLAG(AF) | FLAG(CF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) }, // #2 [ref=2x]
+ { FLAG(CF), FLAG(CF) }, // #3 [ref=2x]
+ { FLAG(OF), FLAG(OF) }, // #4 [ref=1x]
+ { 0, FLAG(ZF) }, // #5 [ref=7x]
+ { 0, FLAG(AF) | FLAG(CF) | FLAG(OF) | FLAG(PF) | FLAG(SF) }, // #6 [ref=4x]
+ { 0, FLAG(AC) }, // #7 [ref=2x]
+ { 0, FLAG(CF) }, // #8 [ref=2x]
+ { 0, FLAG(DF) }, // #9 [ref=2x]
+ { 0, FLAG(IF) }, // #10 [ref=2x]
+ { FLAG(CF) | FLAG(ZF), 0 }, // #11 [ref=14x]
+ { FLAG(CF), 0 }, // #12 [ref=20x]
+ { FLAG(ZF), 0 }, // #13 [ref=16x]
+ { FLAG(OF) | FLAG(SF) | FLAG(ZF), 0 }, // #14 [ref=12x]
+ { FLAG(OF) | FLAG(SF), 0 }, // #15 [ref=12x]
+ { FLAG(OF), 0 }, // #16 [ref=7x]
+ { FLAG(PF), 0 }, // #17 [ref=14x]
+ { FLAG(SF), 0 }, // #18 [ref=6x]
+ { FLAG(DF), FLAG(AF) | FLAG(CF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) }, // #19 [ref=2x]
+ { 0, FLAG(AF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) }, // #20 [ref=2x]
+ { 0, FLAG(CF) | FLAG(PF) | FLAG(ZF) }, // #21 [ref=4x]
+ { FLAG(AF) | FLAG(CF) | FLAG(PF) | FLAG(SF) | FLAG(ZF), 0 }, // #22 [ref=1x]
+ { FLAG(DF), 0 }, // #23 [ref=3x]
+ { 0, FLAG(AF) | FLAG(CF) | FLAG(DF) | FLAG(IF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) }, // #24 [ref=3x]
+ { FLAG(AF) | FLAG(CF) | FLAG(DF) | FLAG(IF) | FLAG(OF) | FLAG(PF) | FLAG(SF) | FLAG(ZF), 0 }, // #25 [ref=3x]
+ { FLAG(CF) | FLAG(OF), FLAG(CF) | FLAG(OF) }, // #26 [ref=2x]
+ { 0, FLAG(CF) | FLAG(OF) }, // #27 [ref=2x]
+ { 0, FLAG(AF) | FLAG(CF) | FLAG(PF) | FLAG(SF) | FLAG(ZF) } // #28 [ref=1x]
+};
+#undef FLAG
+// ----------------------------------------------------------------------------
+// ${InstCommonInfoTableB:End}
+
+// ============================================================================
+// [asmjit::Inst - NameData]
+// ============================================================================
+
+#ifndef ASMJIT_NO_TEXT
+// ${NameData:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+const char InstDB::_nameData[] =
+ "\0" "aaa\0" "aad\0" "aam\0" "aas\0" "adc\0" "adcx\0" "adox\0" "arpl\0" "bextr\0" "blcfill\0" "blci\0" "blcic\0"
+ "blcmsk\0" "blcs\0" "blsfill\0" "blsi\0" "blsic\0" "blsmsk\0" "blsr\0" "bndcl\0" "bndcn\0" "bndcu\0" "bndldx\0"
+ "bndmk\0" "bndmov\0" "bndstx\0" "bound\0" "bsf\0" "bsr\0" "bswap\0" "bt\0" "btc\0" "btr\0" "bts\0" "bzhi\0" "cbw\0"
+ "cdq\0" "cdqe\0" "clac\0" "clc\0" "cld\0" "cldemote\0" "clflush\0" "clflushopt\0" "clgi\0" "cli\0" "clts\0" "clwb\0"
+ "clzero\0" "cmc\0" "cmova\0" "cmovae\0" "cmovc\0" "cmovg\0" "cmovge\0" "cmovl\0" "cmovle\0" "cmovna\0" "cmovnae\0"
+ "cmovnc\0" "cmovng\0" "cmovnge\0" "cmovnl\0" "cmovnle\0" "cmovno\0" "cmovnp\0" "cmovns\0" "cmovnz\0" "cmovo\0"
+ "cmovp\0" "cmovpe\0" "cmovpo\0" "cmovs\0" "cmovz\0" "cmp\0" "cmps\0" "cmpxchg\0" "cmpxchg16b\0" "cmpxchg8b\0"
+ "cpuid\0" "cqo\0" "crc32\0" "cvtpd2pi\0" "cvtpi2pd\0" "cvtpi2ps\0" "cvtps2pi\0" "cvttpd2pi\0" "cvttps2pi\0" "cwd\0"
+ "cwde\0" "daa\0" "das\0" "enqcmd\0" "enqcmds\0" "f2xm1\0" "fabs\0" "faddp\0" "fbld\0" "fbstp\0" "fchs\0" "fclex\0"
+ "fcmovb\0" "fcmovbe\0" "fcmove\0" "fcmovnb\0" "fcmovnbe\0" "fcmovne\0" "fcmovnu\0" "fcmovu\0" "fcom\0" "fcomi\0"
+ "fcomip\0" "fcomp\0" "fcompp\0" "fcos\0" "fdecstp\0" "fdiv\0" "fdivp\0" "fdivr\0" "fdivrp\0" "femms\0" "ffree\0"
+ "fiadd\0" "ficom\0" "ficomp\0" "fidiv\0" "fidivr\0" "fild\0" "fimul\0" "fincstp\0" "finit\0" "fist\0" "fistp\0"
+ "fisttp\0" "fisub\0" "fisubr\0" "fld\0" "fld1\0" "fldcw\0" "fldenv\0" "fldl2e\0" "fldl2t\0" "fldlg2\0" "fldln2\0"
+ "fldpi\0" "fldz\0" "fmulp\0" "fnclex\0" "fninit\0" "fnop\0" "fnsave\0" "fnstcw\0" "fnstenv\0" "fnstsw\0" "fpatan\0"
+ "fprem\0" "fprem1\0" "fptan\0" "frndint\0" "frstor\0" "fsave\0" "fscale\0" "fsin\0" "fsincos\0" "fsqrt\0" "fst\0"
+ "fstcw\0" "fstenv\0" "fstp\0" "fstsw\0" "fsubp\0" "fsubrp\0" "ftst\0" "fucom\0" "fucomi\0" "fucomip\0" "fucomp\0"
+ "fucompp\0" "fwait\0" "fxam\0" "fxch\0" "fxrstor\0" "fxrstor64\0" "fxsave\0" "fxsave64\0" "fxtract\0" "fyl2x\0"
+ "fyl2xp1\0" "getsec\0" "hlt\0" "inc\0" "insertq\0" "int3\0" "into\0" "invept\0" "invlpg\0" "invlpga\0" "invpcid\0"
+ "invvpid\0" "iret\0" "iretd\0" "iretq\0" "iretw\0" "ja\0" "jae\0" "jb\0" "jbe\0" "jc\0" "je\0" "jecxz\0" "jg\0"
+ "jge\0" "jl\0" "jle\0" "jmp\0" "jna\0" "jnae\0" "jnb\0" "jnbe\0" "jnc\0" "jne\0" "jng\0" "jnge\0" "jnl\0" "jnle\0"
+ "jno\0" "jnp\0" "jns\0" "jnz\0" "jo\0" "jp\0" "jpe\0" "jpo\0" "js\0" "jz\0" "kaddb\0" "kaddd\0" "kaddq\0" "kaddw\0"
+ "kandb\0" "kandd\0" "kandnb\0" "kandnd\0" "kandnq\0" "kandnw\0" "kandq\0" "kandw\0" "kmovb\0" "kmovw\0" "knotb\0"
+ "knotd\0" "knotq\0" "knotw\0" "korb\0" "kord\0" "korq\0" "kortestb\0" "kortestd\0" "kortestq\0" "kortestw\0" "korw\0"
+ "kshiftlb\0" "kshiftld\0" "kshiftlq\0" "kshiftlw\0" "kshiftrb\0" "kshiftrd\0" "kshiftrq\0" "kshiftrw\0" "ktestb\0"
+ "ktestd\0" "ktestq\0" "ktestw\0" "kunpckbw\0" "kunpckdq\0" "kunpckwd\0" "kxnorb\0" "kxnord\0" "kxnorq\0" "kxnorw\0"
+ "kxorb\0" "kxord\0" "kxorq\0" "kxorw\0" "lahf\0" "lar\0" "lds\0" "lea\0" "leave\0" "les\0" "lfence\0" "lfs\0"
+ "lgdt\0" "lgs\0" "lidt\0" "lldt\0" "llwpcb\0" "lmsw\0" "lods\0" "loop\0" "loope\0" "loopne\0" "lsl\0" "ltr\0"
+ "lwpins\0" "lwpval\0" "lzcnt\0" "mfence\0" "monitor\0" "monitorx\0" "movdir64b\0" "movdiri\0" "movdq2q\0" "movnti\0"
+ "movntq\0" "movntsd\0" "movntss\0" "movq2dq\0" "movsx\0" "movsxd\0" "movzx\0" "mulx\0" "mwait\0" "mwaitx\0" "neg\0"
+ "not\0" "out\0" "outs\0" "pause\0" "pavgusb\0" "pcommit\0" "pdep\0" "pext\0" "pf2id\0" "pf2iw\0" "pfacc\0" "pfadd\0"
+ "pfcmpeq\0" "pfcmpge\0" "pfcmpgt\0" "pfmax\0" "pfmin\0" "pfmul\0" "pfnacc\0" "pfpnacc\0" "pfrcp\0" "pfrcpit1\0"
+ "pfrcpit2\0" "pfrcpv\0" "pfrsqit1\0" "pfrsqrt\0" "pfrsqrtv\0" "pfsub\0" "pfsubr\0" "pi2fd\0" "pi2fw\0" "pmulhrw\0"
+ "pop\0" "popa\0" "popad\0" "popcnt\0" "popf\0" "popfd\0" "popfq\0" "prefetch\0" "prefetchnta\0" "prefetcht0\0"
+ "prefetcht1\0" "prefetcht2\0" "prefetchw\0" "prefetchwt1\0" "pshufw\0" "pswapd\0" "push\0" "pusha\0" "pushad\0"
+ "pushf\0" "pushfd\0" "pushfq\0" "rcl\0" "rcr\0" "rdfsbase\0" "rdgsbase\0" "rdmsr\0" "rdpid\0" "rdpmc\0" "rdrand\0"
+ "rdseed\0" "rdtsc\0" "rdtscp\0" "rol\0" "ror\0" "rorx\0" "rsm\0" "sahf\0" "sal\0" "sar\0" "sarx\0" "sbb\0" "scas\0"
+ "seta\0" "setae\0" "setb\0" "setbe\0" "setc\0" "sete\0" "setg\0" "setge\0" "setl\0" "setle\0" "setna\0" "setnae\0"
+ "setnb\0" "setnbe\0" "setnc\0" "setne\0" "setng\0" "setnge\0" "setnl\0" "setnle\0" "setno\0" "setnp\0" "setns\0"
+ "setnz\0" "seto\0" "setp\0" "setpe\0" "setpo\0" "sets\0" "setz\0" "sfence\0" "sgdt\0" "sha1msg1\0" "sha1msg2\0"
+ "sha1nexte\0" "sha1rnds4\0" "sha256msg1\0" "sha256msg2\0" "sha256rnds2\0" "shl\0" "shlx\0" "shr\0" "shrd\0" "shrx\0"
+ "sidt\0" "skinit\0" "sldt\0" "slwpcb\0" "smsw\0" "stac\0" "stc\0" "stgi\0" "sti\0" "stos\0" "str\0" "swapgs\0"
+ "syscall\0" "sysenter\0" "sysexit\0" "sysexit64\0" "sysret\0" "sysret64\0" "t1mskc\0" "tzcnt\0" "tzmsk\0" "ud2\0"
+ "v4fmaddps\0" "v4fmaddss\0" "v4fnmaddps\0" "v4fnmaddss\0" "vaddpd\0" "vaddps\0" "vaddsd\0" "vaddss\0" "vaddsubpd\0"
+ "vaddsubps\0" "vaesdec\0" "vaesdeclast\0" "vaesenc\0" "vaesenclast\0" "vaesimc\0" "vaeskeygenassist\0" "valignd\0"
+ "valignq\0" "vandnpd\0" "vandnps\0" "vandpd\0" "vandps\0" "vblendmb\0" "vblendmd\0" "vblendmpd\0" "vblendmps\0"
+ "vblendmq\0" "vblendmw\0" "vblendpd\0" "vblendps\0" "vblendvpd\0" "vblendvps\0" "vbroadcastf128\0"
+ "vbroadcastf32x2\0" "vbroadcastf32x4\0" "vbroadcastf32x8\0" "vbroadcastf64x2\0" "vbroadcastf64x4\0"
+ "vbroadcasti128\0" "vbroadcasti32x2\0" "vbroadcasti32x4\0" "vbroadcasti32x8\0" "vbroadcasti64x2\0"
+ "vbroadcasti64x4\0" "vbroadcastsd\0" "vbroadcastss\0" "vcmppd\0" "vcmpps\0" "vcmpsd\0" "vcmpss\0" "vcomisd\0"
+ "vcomiss\0" "vcompresspd\0" "vcompressps\0" "vcvtdq2pd\0" "vcvtdq2ps\0" "vcvtne2ps2bf16\0" "vcvtneps2bf16\0"
+ "vcvtpd2dq\0" "vcvtpd2ps\0" "vcvtpd2qq\0" "vcvtpd2udq\0" "vcvtpd2uqq\0" "vcvtph2ps\0" "vcvtps2dq\0" "vcvtps2pd\0"
+ "vcvtps2ph\0" "vcvtps2qq\0" "vcvtps2udq\0" "vcvtps2uqq\0" "vcvtqq2pd\0" "vcvtqq2ps\0" "vcvtsd2si\0" "vcvtsd2ss\0"
+ "vcvtsd2usi\0" "vcvtsi2sd\0" "vcvtsi2ss\0" "vcvtss2sd\0" "vcvtss2si\0" "vcvtss2usi\0" "vcvttpd2dq\0" "vcvttpd2qq\0"
+ "vcvttpd2udq\0" "vcvttpd2uqq\0" "vcvttps2dq\0" "vcvttps2qq\0" "vcvttps2udq\0" "vcvttps2uqq\0" "vcvttsd2si\0"
+ "vcvttsd2usi\0" "vcvttss2si\0" "vcvttss2usi\0" "vcvtudq2pd\0" "vcvtudq2ps\0" "vcvtuqq2pd\0" "vcvtuqq2ps\0"
+ "vcvtusi2sd\0" "vcvtusi2ss\0" "vdbpsadbw\0" "vdivpd\0" "vdivps\0" "vdivsd\0" "vdivss\0" "vdpbf16ps\0" "vdppd\0"
+ "vdpps\0" "verr\0" "verw\0" "vexp2pd\0" "vexp2ps\0" "vexpandpd\0" "vexpandps\0" "vextractf128\0" "vextractf32x4\0"
+ "vextractf32x8\0" "vextractf64x2\0" "vextractf64x4\0" "vextracti128\0" "vextracti32x4\0" "vextracti32x8\0"
+ "vextracti64x2\0" "vextracti64x4\0" "vextractps\0" "vfixupimmpd\0" "vfixupimmps\0" "vfixupimmsd\0" "vfixupimmss\0"
+ "vfmadd132pd\0" "vfmadd132ps\0" "vfmadd132sd\0" "vfmadd132ss\0" "vfmadd213pd\0" "vfmadd213ps\0" "vfmadd213sd\0"
+ "vfmadd213ss\0" "vfmadd231pd\0" "vfmadd231ps\0" "vfmadd231sd\0" "vfmadd231ss\0" "vfmaddpd\0" "vfmaddps\0"
+ "vfmaddsd\0" "vfmaddss\0" "vfmaddsub132pd\0" "vfmaddsub132ps\0" "vfmaddsub213pd\0" "vfmaddsub213ps\0"
+ "vfmaddsub231pd\0" "vfmaddsub231ps\0" "vfmaddsubpd\0" "vfmaddsubps\0" "vfmsub132pd\0" "vfmsub132ps\0" "vfmsub132sd\0"
+ "vfmsub132ss\0" "vfmsub213pd\0" "vfmsub213ps\0" "vfmsub213sd\0" "vfmsub213ss\0" "vfmsub231pd\0" "vfmsub231ps\0"
+ "vfmsub231sd\0" "vfmsub231ss\0" "vfmsubadd132pd\0" "vfmsubadd132ps\0" "vfmsubadd213pd\0" "vfmsubadd213ps\0"
+ "vfmsubadd231pd\0" "vfmsubadd231ps\0" "vfmsubaddpd\0" "vfmsubaddps\0" "vfmsubpd\0" "vfmsubps\0" "vfmsubsd\0"
+ "vfmsubss\0" "vfnmadd132pd\0" "vfnmadd132ps\0" "vfnmadd132sd\0" "vfnmadd132ss\0" "vfnmadd213pd\0" "vfnmadd213ps\0"
+ "vfnmadd213sd\0" "vfnmadd213ss\0" "vfnmadd231pd\0" "vfnmadd231ps\0" "vfnmadd231sd\0" "vfnmadd231ss\0" "vfnmaddpd\0"
+ "vfnmaddps\0" "vfnmaddsd\0" "vfnmaddss\0" "vfnmsub132pd\0" "vfnmsub132ps\0" "vfnmsub132sd\0" "vfnmsub132ss\0"
+ "vfnmsub213pd\0" "vfnmsub213ps\0" "vfnmsub213sd\0" "vfnmsub213ss\0" "vfnmsub231pd\0" "vfnmsub231ps\0"
+ "vfnmsub231sd\0" "vfnmsub231ss\0" "vfnmsubpd\0" "vfnmsubps\0" "vfnmsubsd\0" "vfnmsubss\0" "vfpclasspd\0"
+ "vfpclassps\0" "vfpclasssd\0" "vfpclassss\0" "vfrczpd\0" "vfrczps\0" "vfrczsd\0" "vfrczss\0" "vgatherdpd\0"
+ "vgatherdps\0" "vgatherpf0dpd\0" "vgatherpf0dps\0" "vgatherpf0qpd\0" "vgatherpf0qps\0" "vgatherpf1dpd\0"
+ "vgatherpf1dps\0" "vgatherpf1qpd\0" "vgatherpf1qps\0" "vgatherqpd\0" "vgatherqps\0" "vgetexppd\0" "vgetexpps\0"
+ "vgetexpsd\0" "vgetexpss\0" "vgetmantpd\0" "vgetmantps\0" "vgetmantsd\0" "vgetmantss\0" "vgf2p8affineinvqb\0"
+ "vgf2p8affineqb\0" "vgf2p8mulb\0" "vhaddpd\0" "vhaddps\0" "vhsubpd\0" "vhsubps\0" "vinsertf128\0" "vinsertf32x4\0"
+ "vinsertf32x8\0" "vinsertf64x2\0" "vinsertf64x4\0" "vinserti128\0" "vinserti32x4\0" "vinserti32x8\0" "vinserti64x2\0"
+ "vinserti64x4\0" "vinsertps\0" "vlddqu\0" "vldmxcsr\0" "vmaskmovdqu\0" "vmaskmovpd\0" "vmaskmovps\0" "vmaxpd\0"
+ "vmaxps\0" "vmaxsd\0" "vmaxss\0" "vmcall\0" "vmclear\0" "vmfunc\0" "vminpd\0" "vminps\0" "vminsd\0" "vminss\0"
+ "vmlaunch\0" "vmload\0" "vmmcall\0" "vmovapd\0" "vmovaps\0" "vmovd\0" "vmovddup\0" "vmovdqa\0" "vmovdqa32\0"
+ "vmovdqa64\0" "vmovdqu\0" "vmovdqu16\0" "vmovdqu32\0" "vmovdqu64\0" "vmovdqu8\0" "vmovhlps\0" "vmovhpd\0" "vmovhps\0"
+ "vmovlhps\0" "vmovlpd\0" "vmovlps\0" "vmovmskpd\0" "vmovmskps\0" "vmovntdq\0" "vmovntdqa\0" "vmovntpd\0" "vmovntps\0"
+ "vmovq\0" "vmovsd\0" "vmovshdup\0" "vmovsldup\0" "vmovss\0" "vmovupd\0" "vmovups\0" "vmpsadbw\0" "vmptrld\0"
+ "vmptrst\0" "vmread\0" "vmresume\0" "vmrun\0" "vmsave\0" "vmulpd\0" "vmulps\0" "vmulsd\0" "vmulss\0" "vmwrite\0"
+ "vmxon\0" "vorpd\0" "vorps\0" "vp4dpwssd\0" "vp4dpwssds\0" "vpabsb\0" "vpabsd\0" "vpabsq\0" "vpabsw\0" "vpackssdw\0"
+ "vpacksswb\0" "vpackusdw\0" "vpackuswb\0" "vpaddb\0" "vpaddd\0" "vpaddq\0" "vpaddsb\0" "vpaddsw\0" "vpaddusb\0"
+ "vpaddusw\0" "vpaddw\0" "vpalignr\0" "vpand\0" "vpandd\0" "vpandn\0" "vpandnd\0" "vpandnq\0" "vpandq\0" "vpavgb\0"
+ "vpavgw\0" "vpblendd\0" "vpblendvb\0" "vpblendw\0" "vpbroadcastb\0" "vpbroadcastd\0" "vpbroadcastmb2d\0"
+ "vpbroadcastmb2q\0" "vpbroadcastq\0" "vpbroadcastw\0" "vpclmulqdq\0" "vpcmov\0" "vpcmpb\0" "vpcmpd\0" "vpcmpeqb\0"
+ "vpcmpeqd\0" "vpcmpeqq\0" "vpcmpeqw\0" "vpcmpestri\0" "vpcmpestrm\0" "vpcmpgtb\0" "vpcmpgtd\0" "vpcmpgtq\0"
+ "vpcmpgtw\0" "vpcmpistri\0" "vpcmpistrm\0" "vpcmpq\0" "vpcmpub\0" "vpcmpud\0" "vpcmpuq\0" "vpcmpuw\0" "vpcmpw\0"
+ "vpcomb\0" "vpcomd\0" "vpcompressb\0" "vpcompressd\0" "vpcompressq\0" "vpcompressw\0" "vpcomq\0" "vpcomub\0"
+ "vpcomud\0" "vpcomuq\0" "vpcomuw\0" "vpcomw\0" "vpconflictd\0" "vpconflictq\0" "vpdpbusd\0" "vpdpbusds\0"
+ "vpdpwssd\0" "vpdpwssds\0" "vperm2f128\0" "vperm2i128\0" "vpermb\0" "vpermd\0" "vpermi2b\0" "vpermi2d\0"
+ "vpermi2pd\0" "vpermi2ps\0" "vpermi2q\0" "vpermi2w\0" "vpermil2pd\0" "vpermil2ps\0" "vpermilpd\0" "vpermilps\0"
+ "vpermpd\0" "vpermps\0" "vpermq\0" "vpermt2b\0" "vpermt2d\0" "vpermt2pd\0" "vpermt2ps\0" "vpermt2q\0" "vpermt2w\0"
+ "vpermw\0" "vpexpandb\0" "vpexpandd\0" "vpexpandq\0" "vpexpandw\0" "vpextrb\0" "vpextrd\0" "vpextrq\0" "vpextrw\0"
+ "vpgatherdd\0" "vpgatherdq\0" "vpgatherqd\0" "vpgatherqq\0" "vphaddbd\0" "vphaddbq\0" "vphaddbw\0" "vphaddd\0"
+ "vphadddq\0" "vphaddsw\0" "vphaddubd\0" "vphaddubq\0" "vphaddubw\0" "vphaddudq\0" "vphadduwd\0" "vphadduwq\0"
+ "vphaddw\0" "vphaddwd\0" "vphaddwq\0" "vphminposuw\0" "vphsubbw\0" "vphsubd\0" "vphsubdq\0" "vphsubsw\0" "vphsubw\0"
+ "vphsubwd\0" "vpinsrb\0" "vpinsrd\0" "vpinsrq\0" "vpinsrw\0" "vplzcntd\0" "vplzcntq\0" "vpmacsdd\0" "vpmacsdqh\0"
+ "vpmacsdql\0" "vpmacssdd\0" "vpmacssdqh\0" "vpmacssdql\0" "vpmacsswd\0" "vpmacssww\0" "vpmacswd\0" "vpmacsww\0"
+ "vpmadcsswd\0" "vpmadcswd\0" "vpmadd52huq\0" "vpmadd52luq\0" "vpmaddubsw\0" "vpmaddwd\0" "vpmaskmovd\0"
+ "vpmaskmovq\0" "vpmaxsb\0" "vpmaxsd\0" "vpmaxsq\0" "vpmaxsw\0" "vpmaxub\0" "vpmaxud\0" "vpmaxuq\0" "vpmaxuw\0"
+ "vpminsb\0" "vpminsd\0" "vpminsq\0" "vpminsw\0" "vpminub\0" "vpminud\0" "vpminuq\0" "vpminuw\0" "vpmovb2m\0"
+ "vpmovd2m\0" "vpmovdb\0" "vpmovdw\0" "vpmovm2b\0" "vpmovm2d\0" "vpmovm2q\0" "vpmovm2w\0" "vpmovmskb\0" "vpmovq2m\0"
+ "vpmovqb\0" "vpmovqd\0" "vpmovqw\0" "vpmovsdb\0" "vpmovsdw\0" "vpmovsqb\0" "vpmovsqd\0" "vpmovsqw\0" "vpmovswb\0"
+ "vpmovsxbd\0" "vpmovsxbq\0" "vpmovsxbw\0" "vpmovsxdq\0" "vpmovsxwd\0" "vpmovsxwq\0" "vpmovusdb\0" "vpmovusdw\0"
+ "vpmovusqb\0" "vpmovusqd\0" "vpmovusqw\0" "vpmovuswb\0" "vpmovw2m\0" "vpmovwb\0" "vpmovzxbd\0" "vpmovzxbq\0"
+ "vpmovzxbw\0" "vpmovzxdq\0" "vpmovzxwd\0" "vpmovzxwq\0" "vpmuldq\0" "vpmulhrsw\0" "vpmulhuw\0" "vpmulhw\0"
+ "vpmulld\0" "vpmullq\0" "vpmullw\0" "vpmultishiftqb\0" "vpmuludq\0" "vpopcntb\0" "vpopcntd\0" "vpopcntq\0"
+ "vpopcntw\0" "vpor\0" "vpord\0" "vporq\0" "vpperm\0" "vprold\0" "vprolq\0" "vprolvd\0" "vprolvq\0" "vprord\0"
+ "vprorq\0" "vprorvd\0" "vprorvq\0" "vprotb\0" "vprotd\0" "vprotq\0" "vprotw\0" "vpsadbw\0" "vpscatterdd\0"
+ "vpscatterdq\0" "vpscatterqd\0" "vpscatterqq\0" "vpshab\0" "vpshad\0" "vpshaq\0" "vpshaw\0" "vpshlb\0" "vpshld\0"
+ "vpshldd\0" "vpshldq\0" "vpshldvd\0" "vpshldvq\0" "vpshldvw\0" "vpshldw\0" "vpshlq\0" "vpshlw\0" "vpshrdd\0"
+ "vpshrdq\0" "vpshrdvd\0" "vpshrdvq\0" "vpshrdvw\0" "vpshrdw\0" "vpshufb\0" "vpshufbitqmb\0" "vpshufd\0" "vpshufhw\0"
+ "vpshuflw\0" "vpsignb\0" "vpsignd\0" "vpsignw\0" "vpslld\0" "vpslldq\0" "vpsllq\0" "vpsllvd\0" "vpsllvq\0"
+ "vpsllvw\0" "vpsllw\0" "vpsrad\0" "vpsraq\0" "vpsravd\0" "vpsravq\0" "vpsravw\0" "vpsraw\0" "vpsrld\0" "vpsrldq\0"
+ "vpsrlq\0" "vpsrlvd\0" "vpsrlvq\0" "vpsrlvw\0" "vpsrlw\0" "vpsubb\0" "vpsubd\0" "vpsubq\0" "vpsubsb\0" "vpsubsw\0"
+ "vpsubusb\0" "vpsubusw\0" "vpsubw\0" "vpternlogd\0" "vpternlogq\0" "vptest\0" "vptestmb\0" "vptestmd\0" "vptestmq\0"
+ "vptestmw\0" "vptestnmb\0" "vptestnmd\0" "vptestnmq\0" "vptestnmw\0" "vpunpckhbw\0" "vpunpckhdq\0" "vpunpckhqdq\0"
+ "vpunpckhwd\0" "vpunpcklbw\0" "vpunpckldq\0" "vpunpcklqdq\0" "vpunpcklwd\0" "vpxor\0" "vpxord\0" "vpxorq\0"
+ "vrangepd\0" "vrangeps\0" "vrangesd\0" "vrangess\0" "vrcp14pd\0" "vrcp14ps\0" "vrcp14sd\0" "vrcp14ss\0" "vrcp28pd\0"
+ "vrcp28ps\0" "vrcp28sd\0" "vrcp28ss\0" "vrcpps\0" "vrcpss\0" "vreducepd\0" "vreduceps\0" "vreducesd\0" "vreducess\0"
+ "vrndscalepd\0" "vrndscaleps\0" "vrndscalesd\0" "vrndscaless\0" "vroundpd\0" "vroundps\0" "vroundsd\0" "vroundss\0"
+ "vrsqrt14pd\0" "vrsqrt14ps\0" "vrsqrt14sd\0" "vrsqrt14ss\0" "vrsqrt28pd\0" "vrsqrt28ps\0" "vrsqrt28sd\0"
+ "vrsqrt28ss\0" "vrsqrtps\0" "vrsqrtss\0" "vscalefpd\0" "vscalefps\0" "vscalefsd\0" "vscalefss\0" "vscatterdpd\0"
+ "vscatterdps\0" "vscatterpf0dpd\0" "vscatterpf0dps\0" "vscatterpf0qpd\0" "vscatterpf0qps\0" "vscatterpf1dpd\0"
+ "vscatterpf1dps\0" "vscatterpf1qpd\0" "vscatterpf1qps\0" "vscatterqpd\0" "vscatterqps\0" "vshuff32x4\0"
+ "vshuff64x2\0" "vshufi32x4\0" "vshufi64x2\0" "vshufpd\0" "vshufps\0" "vsqrtpd\0" "vsqrtps\0" "vsqrtsd\0" "vsqrtss\0"
+ "vstmxcsr\0" "vsubpd\0" "vsubps\0" "vsubsd\0" "vsubss\0" "vtestpd\0" "vtestps\0" "vucomisd\0" "vucomiss\0"
+ "vunpckhpd\0" "vunpckhps\0" "vunpcklpd\0" "vunpcklps\0" "vxorpd\0" "vxorps\0" "vzeroall\0" "vzeroupper\0" "wbinvd\0"
+ "wbnoinvd\0" "wrfsbase\0" "wrgsbase\0" "wrmsr\0" "xabort\0" "xadd\0" "xbegin\0" "xend\0" "xgetbv\0" "xlatb\0"
+ "xrstors\0" "xrstors64\0" "xsavec\0" "xsavec64\0" "xsaveopt\0" "xsaveopt64\0" "xsaves\0" "xsaves64\0" "xsetbv\0"
+ "xtest";
+
+const InstDB::InstNameIndex InstDB::instNameIndex[26] = {
+ { Inst::kIdAaa , Inst::kIdArpl + 1 },
+ { Inst::kIdBextr , Inst::kIdBzhi + 1 },
+ { Inst::kIdCall , Inst::kIdCwde + 1 },
+ { Inst::kIdDaa , Inst::kIdDpps + 1 },
+ { Inst::kIdEmms , Inst::kIdExtrq + 1 },
+ { Inst::kIdF2xm1 , Inst::kIdFyl2xp1 + 1 },
+ { Inst::kIdGetsec , Inst::kIdGf2p8mulb + 1 },
+ { Inst::kIdHaddpd , Inst::kIdHsubps + 1 },
+ { Inst::kIdIdiv , Inst::kIdIretw + 1 },
+ { Inst::kIdJa , Inst::kIdJz + 1 },
+ { Inst::kIdKaddb , Inst::kIdKxorw + 1 },
+ { Inst::kIdLahf , Inst::kIdLzcnt + 1 },
+ { Inst::kIdMaskmovdqu , Inst::kIdMwaitx + 1 },
+ { Inst::kIdNeg , Inst::kIdNot + 1 },
+ { Inst::kIdOr , Inst::kIdOuts + 1 },
+ { Inst::kIdPabsb , Inst::kIdPxor + 1 },
+ { Inst::kIdNone , Inst::kIdNone + 1 },
+ { Inst::kIdRcl , Inst::kIdRsqrtss + 1 },
+ { Inst::kIdSahf , Inst::kIdSysret64 + 1 },
+ { Inst::kIdT1mskc , Inst::kIdTzmsk + 1 },
+ { Inst::kIdUcomisd , Inst::kIdUnpcklps + 1 },
+ { Inst::kIdV4fmaddps , Inst::kIdVzeroupper + 1 },
+ { Inst::kIdWbinvd , Inst::kIdWrmsr + 1 },
+ { Inst::kIdXabort , Inst::kIdXtest + 1 },
+ { Inst::kIdNone , Inst::kIdNone + 1 },
+ { Inst::kIdNone , Inst::kIdNone + 1 }
+};
+// ----------------------------------------------------------------------------
+// ${NameData:End}
+#endif // !ASMJIT_NO_TEXT
+
+// ============================================================================
+// [asmjit::x86::InstDB - InstSignature / OpSignature]
+// ============================================================================
+
+#ifndef ASMJIT_NO_VALIDATION
+// ${InstSignatureTable:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+#define ROW(count, x86, x64, implicit, o0, o1, o2, o3, o4, o5) \
+ { count, (x86 ? uint8_t(InstDB::kModeX86) : uint8_t(0)) | \
+ (x64 ? uint8_t(InstDB::kModeX64) : uint8_t(0)) , \
+ implicit, \
+ 0, \
+ { o0, o1, o2, o3, o4, o5 } \
+ }
+const InstDB::InstSignature InstDB::_instSignatureTable[] = {
+ ROW(2, 1, 1, 0, 1 , 2 , 0 , 0 , 0 , 0 ), // #0 {r8lo|r8hi|m8|mem, r8lo|r8hi}
+ ROW(2, 1, 1, 0, 3 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem|sreg, r16}
+ ROW(2, 1, 1, 0, 5 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem|sreg, r32}
+ ROW(2, 0, 1, 0, 7 , 8 , 0 , 0 , 0 , 0 ), // {r64|m64|mem|sreg|creg|dreg, r64}
+ ROW(2, 1, 1, 0, 9 , 10 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi|m8, i8|u8}
+ ROW(2, 1, 1, 0, 11 , 12 , 0 , 0 , 0 , 0 ), // {r16|m16, i16|u16}
+ ROW(2, 1, 1, 0, 13 , 14 , 0 , 0 , 0 , 0 ), // {r32|m32, i32|u32}
+ ROW(2, 0, 1, 0, 15 , 16 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, i32}
+ ROW(2, 0, 1, 0, 8 , 17 , 0 , 0 , 0 , 0 ), // {r64, i64|u64|m64|mem|sreg|creg|dreg}
+ ROW(2, 1, 1, 0, 2 , 18 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi, m8|mem}
+ ROW(2, 1, 1, 0, 4 , 19 , 0 , 0 , 0 , 0 ), // {r16, m16|mem|sreg}
+ ROW(2, 1, 1, 0, 6 , 20 , 0 , 0 , 0 , 0 ), // {r32, m32|mem|sreg}
+ ROW(2, 1, 1, 0, 21 , 22 , 0 , 0 , 0 , 0 ), // {m16|mem, sreg}
+ ROW(2, 1, 1, 0, 22 , 21 , 0 , 0 , 0 , 0 ), // {sreg, m16|mem}
+ ROW(2, 1, 0, 0, 6 , 23 , 0 , 0 , 0 , 0 ), // {r32, creg|dreg}
+ ROW(2, 1, 0, 0, 23 , 6 , 0 , 0 , 0 , 0 ), // {creg|dreg, r32}
+ ROW(2, 1, 1, 0, 9 , 10 , 0 , 0 , 0 , 0 ), // #16 {r8lo|r8hi|m8, i8|u8}
+ ROW(2, 1, 1, 0, 11 , 12 , 0 , 0 , 0 , 0 ), // {r16|m16, i16|u16}
+ ROW(2, 1, 1, 0, 13 , 14 , 0 , 0 , 0 , 0 ), // {r32|m32, i32|u32}
+ ROW(2, 0, 1, 0, 15 , 24 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, i32|r64}
+ ROW(2, 1, 1, 0, 25 , 26 , 0 , 0 , 0 , 0 ), // {r16|m16|r32|m32|r64|m64|mem, i8}
+ ROW(2, 1, 1, 0, 1 , 2 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi|m8|mem, r8lo|r8hi}
+ ROW(2, 1, 1, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem, r16}
+ ROW(2, 1, 1, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, r32}
+ ROW(2, 1, 1, 0, 2 , 18 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi, m8|mem}
+ ROW(2, 1, 1, 0, 4 , 21 , 0 , 0 , 0 , 0 ), // {r16, m16|mem}
+ ROW(2, 1, 1, 0, 6 , 29 , 0 , 0 , 0 , 0 ), // {r32, m32|mem}
+ ROW(2, 0, 1, 0, 8 , 30 , 0 , 0 , 0 , 0 ), // {r64, m64|mem}
+ ROW(2, 1, 1, 0, 31 , 10 , 0 , 0 , 0 , 0 ), // #28 {r8lo|r8hi|m8|r16|m16|r32|m32|r64|m64|mem, i8|u8}
+ ROW(2, 1, 1, 0, 11 , 12 , 0 , 0 , 0 , 0 ), // {r16|m16, i16|u16}
+ ROW(2, 1, 1, 0, 13 , 14 , 0 , 0 , 0 , 0 ), // {r32|m32, i32|u32}
+ ROW(2, 0, 1, 0, 8 , 32 , 0 , 0 , 0 , 0 ), // {r64, u32|i32|r64|m64|mem}
+ ROW(2, 0, 1, 0, 30 , 24 , 0 , 0 , 0 , 0 ), // {m64|mem, i32|r64}
+ ROW(2, 1, 1, 0, 1 , 2 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi|m8|mem, r8lo|r8hi}
+ ROW(2, 1, 1, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem, r16}
+ ROW(2, 1, 1, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, r32}
+ ROW(2, 1, 1, 0, 2 , 18 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi, m8|mem}
+ ROW(2, 1, 1, 0, 4 , 21 , 0 , 0 , 0 , 0 ), // {r16, m16|mem}
+ ROW(2, 1, 1, 0, 6 , 29 , 0 , 0 , 0 , 0 ), // {r32, m32|mem}
+ ROW(2, 1, 1, 1, 33 , 1 , 0 , 0 , 0 , 0 ), // #39 {<ax>, r8lo|r8hi|m8|mem}
+ ROW(3, 1, 1, 2, 34 , 33 , 27 , 0 , 0 , 0 ), // {<dx>, <ax>, r16|m16|mem}
+ ROW(3, 1, 1, 2, 35 , 36 , 28 , 0 , 0 , 0 ), // {<edx>, <eax>, r32|m32|mem}
+ ROW(3, 0, 1, 2, 37 , 38 , 15 , 0 , 0 , 0 ), // {<rdx>, <rax>, r64|m64|mem}
+ ROW(2, 1, 1, 0, 4 , 39 , 0 , 0 , 0 , 0 ), // {r16, r16|m16|mem|i8|i16}
+ ROW(2, 1, 1, 0, 6 , 40 , 0 , 0 , 0 , 0 ), // {r32, r32|m32|mem|i8|i32}
+ ROW(2, 0, 1, 0, 8 , 41 , 0 , 0 , 0 , 0 ), // {r64, r64|m64|mem|i8|i32}
+ ROW(3, 1, 1, 0, 4 , 27 , 42 , 0 , 0 , 0 ), // {r16, r16|m16|mem, i8|i16|u16}
+ ROW(3, 1, 1, 0, 6 , 28 , 43 , 0 , 0 , 0 ), // {r32, r32|m32|mem, i8|i32|u32}
+ ROW(3, 0, 1, 0, 8 , 15 , 44 , 0 , 0 , 0 ), // {r64, r64|m64|mem, i8|i32}
+ ROW(2, 1, 1, 0, 1 , 2 , 0 , 0 , 0 , 0 ), // #49 {r8lo|r8hi|m8|mem, r8lo|r8hi}
+ ROW(2, 1, 1, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem, r16}
+ ROW(2, 1, 1, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, r32}
+ ROW(2, 0, 1, 0, 15 , 8 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, r64}
+ ROW(2, 1, 1, 0, 2 , 18 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi, m8|mem}
+ ROW(2, 1, 1, 0, 4 , 21 , 0 , 0 , 0 , 0 ), // {r16, m16|mem}
+ ROW(2, 1, 1, 0, 6 , 29 , 0 , 0 , 0 , 0 ), // {r32, m32|mem}
+ ROW(2, 0, 1, 0, 8 , 30 , 0 , 0 , 0 , 0 ), // {r64, m64|mem}
+ ROW(2, 1, 1, 0, 9 , 10 , 0 , 0 , 0 , 0 ), // #57 {r8lo|r8hi|m8, i8|u8}
+ ROW(2, 1, 1, 0, 11 , 12 , 0 , 0 , 0 , 0 ), // {r16|m16, i16|u16}
+ ROW(2, 1, 1, 0, 13 , 14 , 0 , 0 , 0 , 0 ), // {r32|m32, i32|u32}
+ ROW(2, 0, 1, 0, 15 , 24 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, i32|r64}
+ ROW(2, 1, 1, 0, 1 , 2 , 0 , 0 , 0 , 0 ), // {r8lo|r8hi|m8|mem, r8lo|r8hi}
+ ROW(2, 1, 1, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem, r16}
+ ROW(2, 1, 1, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, r32}
+ ROW(2, 1, 1, 0, 4 , 21 , 0 , 0 , 0 , 0 ), // #64 {r16, m16|mem}
+ ROW(2, 1, 1, 0, 6 , 29 , 0 , 0 , 0 , 0 ), // {r32, m32|mem}
+ ROW(2, 0, 1, 0, 8 , 30 , 0 , 0 , 0 , 0 ), // {r64, m64|mem}
+ ROW(2, 1, 1, 0, 21 , 4 , 0 , 0 , 0 , 0 ), // {m16|mem, r16}
+ ROW(2, 1, 1, 0, 29 , 6 , 0 , 0 , 0 , 0 ), // #68 {m32|mem, r32}
+ ROW(2, 0, 1, 0, 30 , 8 , 0 , 0 , 0 , 0 ), // {m64|mem, r64}
+ ROW(2, 1, 1, 0, 45 , 46 , 0 , 0 , 0 , 0 ), // #70 {xmm, xmm|m128|mem}
+ ROW(2, 1, 1, 0, 47 , 45 , 0 , 0 , 0 , 0 ), // #71 {m128|mem, xmm}
+ ROW(2, 1, 1, 0, 48 , 49 , 0 , 0 , 0 , 0 ), // {ymm, ymm|m256|mem}
+ ROW(2, 1, 1, 0, 50 , 48 , 0 , 0 , 0 , 0 ), // {m256|mem, ymm}
+ ROW(2, 1, 1, 0, 51 , 52 , 0 , 0 , 0 , 0 ), // #74 {zmm, zmm|m512|mem}
+ ROW(2, 1, 1, 0, 53 , 51 , 0 , 0 , 0 , 0 ), // {m512|mem, zmm}
+ ROW(3, 1, 1, 0, 45 , 45 , 54 , 0 , 0 , 0 ), // #76 {xmm, xmm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 45 , 47 , 10 , 0 , 0 , 0 ), // {xmm, m128|mem, i8|u8}
+ ROW(3, 1, 1, 0, 48 , 48 , 55 , 0 , 0 , 0 ), // {ymm, ymm, ymm|m256|mem|i8|u8}
+ ROW(3, 1, 1, 0, 48 , 50 , 10 , 0 , 0 , 0 ), // {ymm, m256|mem, i8|u8}
+ ROW(3, 1, 1, 0, 51 , 51 , 56 , 0 , 0 , 0 ), // {zmm, zmm, zmm|m512|mem|i8|u8}
+ ROW(3, 1, 1, 0, 51 , 53 , 10 , 0 , 0 , 0 ), // {zmm, m512|mem, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 45 , 54 , 0 , 0 , 0 ), // #82 {xmm, xmm, i8|u8|xmm|m128|mem}
+ ROW(3, 1, 1, 0, 48 , 48 , 54 , 0 , 0 , 0 ), // {ymm, ymm, i8|u8|xmm|m128|mem}
+ ROW(3, 1, 1, 0, 45 , 47 , 10 , 0 , 0 , 0 ), // {xmm, m128|mem, i8|u8}
+ ROW(3, 1, 1, 0, 48 , 50 , 10 , 0 , 0 , 0 ), // {ymm, m256|mem, i8|u8}
+ ROW(3, 1, 1, 0, 51 , 51 , 54 , 0 , 0 , 0 ), // {zmm, zmm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 51 , 53 , 10 , 0 , 0 , 0 ), // {zmm, m512|mem, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 45 , 54 , 0 , 0 , 0 ), // #88 {xmm, xmm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 45 , 47 , 10 , 0 , 0 , 0 ), // {xmm, m128|mem, i8|u8}
+ ROW(3, 1, 1, 0, 48 , 48 , 54 , 0 , 0 , 0 ), // {ymm, ymm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 48 , 50 , 10 , 0 , 0 , 0 ), // {ymm, m256|mem, i8|u8}
+ ROW(3, 1, 1, 0, 51 , 51 , 54 , 0 , 0 , 0 ), // {zmm, zmm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 51 , 53 , 10 , 0 , 0 , 0 ), // {zmm, m512|mem, i8|u8}
+ ROW(2, 1, 1, 0, 57 , 58 , 0 , 0 , 0 , 0 ), // #94 {mm, mm|m64|mem|r64}
+ ROW(2, 1, 1, 0, 15 , 59 , 0 , 0 , 0 , 0 ), // {m64|mem|r64, mm|xmm}
+ ROW(2, 0, 1, 0, 45 , 15 , 0 , 0 , 0 , 0 ), // {xmm, r64|m64|mem}
+ ROW(2, 1, 1, 0, 45 , 60 , 0 , 0 , 0 , 0 ), // #97 {xmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 30 , 45 , 0 , 0 , 0 , 0 ), // #98 {m64|mem, xmm}
+ ROW(3, 1, 1, 0, 45 , 61 , 45 , 0 , 0 , 0 ), // #99 {xmm, vm32x, xmm}
+ ROW(3, 1, 1, 0, 48 , 61 , 48 , 0 , 0 , 0 ), // {ymm, vm32x, ymm}
+ ROW(2, 1, 1, 0, 45 , 61 , 0 , 0 , 0 , 0 ), // {xmm, vm32x}
+ ROW(2, 1, 1, 0, 48 , 62 , 0 , 0 , 0 , 0 ), // {ymm, vm32y}
+ ROW(2, 1, 1, 0, 51 , 63 , 0 , 0 , 0 , 0 ), // {zmm, vm32z}
+ ROW(3, 1, 1, 0, 45 , 61 , 45 , 0 , 0 , 0 ), // #104 {xmm, vm32x, xmm}
+ ROW(3, 1, 1, 0, 48 , 62 , 48 , 0 , 0 , 0 ), // {ymm, vm32y, ymm}
+ ROW(2, 1, 1, 0, 45 , 61 , 0 , 0 , 0 , 0 ), // {xmm, vm32x}
+ ROW(2, 1, 1, 0, 48 , 62 , 0 , 0 , 0 , 0 ), // {ymm, vm32y}
+ ROW(2, 1, 1, 0, 51 , 63 , 0 , 0 , 0 , 0 ), // {zmm, vm32z}
+ ROW(3, 1, 1, 0, 45 , 64 , 45 , 0 , 0 , 0 ), // #109 {xmm, vm64x, xmm}
+ ROW(3, 1, 1, 0, 48 , 65 , 48 , 0 , 0 , 0 ), // {ymm, vm64y, ymm}
+ ROW(2, 1, 1, 0, 45 , 64 , 0 , 0 , 0 , 0 ), // {xmm, vm64x}
+ ROW(2, 1, 1, 0, 48 , 65 , 0 , 0 , 0 , 0 ), // {ymm, vm64y}
+ ROW(2, 1, 1, 0, 51 , 66 , 0 , 0 , 0 , 0 ), // {zmm, vm64z}
+ ROW(2, 1, 1, 0, 25 , 10 , 0 , 0 , 0 , 0 ), // #114 {r16|m16|r32|m32|r64|m64|mem, i8|u8}
+ ROW(2, 1, 1, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // {r16|m16|mem, r16}
+ ROW(2, 1, 1, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, r32}
+ ROW(2, 0, 1, 0, 15 , 8 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, r64}
+ ROW(3, 1, 1, 1, 1 , 2 , 67 , 0 , 0 , 0 ), // #118 {r8lo|r8hi|m8|mem, r8lo|r8hi, <al>}
+ ROW(3, 1, 1, 1, 27 , 4 , 33 , 0 , 0 , 0 ), // {r16|m16|mem, r16, <ax>}
+ ROW(3, 1, 1, 1, 28 , 6 , 36 , 0 , 0 , 0 ), // {r32|m32|mem, r32, <eax>}
+ ROW(3, 0, 1, 1, 15 , 8 , 38 , 0 , 0 , 0 ), // {r64|m64|mem, r64, <rax>}
+ ROW(1, 1, 1, 0, 68 , 0 , 0 , 0 , 0 , 0 ), // #122 {r16|m16|r64|m64|mem}
+ ROW(1, 1, 0, 0, 13 , 0 , 0 , 0 , 0 , 0 ), // {r32|m32}
+ ROW(1, 1, 0, 0, 69 , 0 , 0 , 0 , 0 , 0 ), // {ds|es|ss}
+ ROW(1, 1, 1, 0, 70 , 0 , 0 , 0 , 0 , 0 ), // {fs|gs}
+ ROW(1, 1, 1, 0, 71 , 0 , 0 , 0 , 0 , 0 ), // #126 {r16|m16|r64|m64|mem|i8|i16|i32}
+ ROW(1, 1, 0, 0, 72 , 0 , 0 , 0 , 0 , 0 ), // {r32|m32|i32|u32}
+ ROW(1, 1, 0, 0, 73 , 0 , 0 , 0 , 0 , 0 ), // {cs|ss|ds|es}
+ ROW(1, 1, 1, 0, 70 , 0 , 0 , 0 , 0 , 0 ), // {fs|gs}
+ ROW(4, 1, 1, 0, 45 , 45 , 45 , 46 , 0 , 0 ), // #130 {xmm, xmm, xmm, xmm|m128|mem}
+ ROW(4, 1, 1, 0, 45 , 45 , 47 , 45 , 0 , 0 ), // {xmm, xmm, m128|mem, xmm}
+ ROW(4, 1, 1, 0, 48 , 48 , 48 , 49 , 0 , 0 ), // {ymm, ymm, ymm, ymm|m256|mem}
+ ROW(4, 1, 1, 0, 48 , 48 , 50 , 48 , 0 , 0 ), // {ymm, ymm, m256|mem, ymm}
+ ROW(3, 1, 1, 0, 45 , 74 , 45 , 0 , 0 , 0 ), // #134 {xmm, vm64x|vm64y, xmm}
+ ROW(2, 1, 1, 0, 45 , 64 , 0 , 0 , 0 , 0 ), // {xmm, vm64x}
+ ROW(2, 1, 1, 0, 48 , 65 , 0 , 0 , 0 , 0 ), // {ymm, vm64y}
+ ROW(2, 1, 1, 0, 51 , 66 , 0 , 0 , 0 , 0 ), // {zmm, vm64z}
+ ROW(3, 1, 1, 0, 47 , 45 , 45 , 0 , 0 , 0 ), // #138 {m128|mem, xmm, xmm}
+ ROW(3, 1, 1, 0, 50 , 48 , 48 , 0 , 0 , 0 ), // {m256|mem, ymm, ymm}
+ ROW(3, 1, 1, 0, 45 , 45 , 47 , 0 , 0 , 0 ), // {xmm, xmm, m128|mem}
+ ROW(3, 1, 1, 0, 48 , 48 , 50 , 0 , 0 , 0 ), // {ymm, ymm, m256|mem}
+ ROW(5, 1, 1, 0, 45 , 45 , 46 , 45 , 75 , 0 ), // #142 {xmm, xmm, xmm|m128|mem, xmm, i4|u4}
+ ROW(5, 1, 1, 0, 45 , 45 , 45 , 47 , 75 , 0 ), // {xmm, xmm, xmm, m128|mem, i4|u4}
+ ROW(5, 1, 1, 0, 48 , 48 , 49 , 48 , 75 , 0 ), // {ymm, ymm, ymm|m256|mem, ymm, i4|u4}
+ ROW(5, 1, 1, 0, 48 , 48 , 48 , 50 , 75 , 0 ), // {ymm, ymm, ymm, m256|mem, i4|u4}
+ ROW(3, 1, 1, 0, 48 , 49 , 10 , 0 , 0 , 0 ), // #146 {ymm, ymm|m256|mem, i8|u8}
+ ROW(3, 1, 1, 0, 48 , 48 , 49 , 0 , 0 , 0 ), // {ymm, ymm, ymm|m256|mem}
+ ROW(3, 1, 1, 0, 51 , 51 , 56 , 0 , 0 , 0 ), // {zmm, zmm, zmm|m512|mem|i8|u8}
+ ROW(3, 1, 1, 0, 51 , 53 , 10 , 0 , 0 , 0 ), // {zmm, m512|mem, i8|u8}
+ ROW(2, 1, 1, 0, 4 , 27 , 0 , 0 , 0 , 0 ), // #150 {r16, r16|m16|mem}
+ ROW(2, 1, 1, 0, 6 , 28 , 0 , 0 , 0 , 0 ), // #151 {r32, r32|m32|mem}
+ ROW(2, 0, 1, 0, 8 , 15 , 0 , 0 , 0 , 0 ), // {r64, r64|m64|mem}
+ ROW(1, 1, 1, 0, 76 , 0 , 0 , 0 , 0 , 0 ), // #153 {m32|m64}
+ ROW(2, 1, 1, 0, 77 , 78 , 0 , 0 , 0 , 0 ), // {st0, st}
+ ROW(2, 1, 1, 0, 78 , 77 , 0 , 0 , 0 , 0 ), // {st, st0}
+ ROW(2, 1, 1, 0, 4 , 29 , 0 , 0 , 0 , 0 ), // #156 {r16, m32|mem}
+ ROW(2, 1, 1, 0, 6 , 79 , 0 , 0 , 0 , 0 ), // {r32, m48|mem}
+ ROW(2, 0, 1, 0, 8 , 80 , 0 , 0 , 0 , 0 ), // {r64, m80|mem}
+ ROW(3, 1, 1, 0, 27 , 4 , 81 , 0 , 0 , 0 ), // #159 {r16|m16|mem, r16, cl|i8|u8}
+ ROW(3, 1, 1, 0, 28 , 6 , 81 , 0 , 0 , 0 ), // {r32|m32|mem, r32, cl|i8|u8}
+ ROW(3, 0, 1, 0, 15 , 8 , 81 , 0 , 0 , 0 ), // {r64|m64|mem, r64, cl|i8|u8}
+ ROW(3, 1, 1, 0, 45 , 45 , 46 , 0 , 0 , 0 ), // #162 {xmm, xmm, xmm|m128|mem}
+ ROW(3, 1, 1, 0, 48 , 48 , 49 , 0 , 0 , 0 ), // #163 {ymm, ymm, ymm|m256|mem}
+ ROW(3, 1, 1, 0, 51 , 51 , 52 , 0 , 0 , 0 ), // {zmm, zmm, zmm|m512|mem}
+ ROW(4, 1, 1, 0, 45 , 45 , 46 , 10 , 0 , 0 ), // #165 {xmm, xmm, xmm|m128|mem, i8|u8}
+ ROW(4, 1, 1, 0, 48 , 48 , 49 , 10 , 0 , 0 ), // #166 {ymm, ymm, ymm|m256|mem, i8|u8}
+ ROW(4, 1, 1, 0, 51 , 51 , 52 , 10 , 0 , 0 ), // {zmm, zmm, zmm|m512|mem, i8|u8}
+ ROW(4, 1, 1, 0, 82 , 45 , 46 , 10 , 0 , 0 ), // #168 {xmm|k, xmm, xmm|m128|mem, i8|u8}
+ ROW(4, 1, 1, 0, 83 , 48 , 49 , 10 , 0 , 0 ), // {ymm|k, ymm, ymm|m256|mem, i8|u8}
+ ROW(4, 1, 1, 0, 84 , 51 , 52 , 10 , 0 , 0 ), // {k, zmm, zmm|m512|mem, i8|u8}
+ ROW(2, 1, 1, 0, 46 , 45 , 0 , 0 , 0 , 0 ), // #171 {xmm|m128|mem, xmm}
+ ROW(2, 1, 1, 0, 49 , 48 , 0 , 0 , 0 , 0 ), // {ymm|m256|mem, ymm}
+ ROW(2, 1, 1, 0, 52 , 51 , 0 , 0 , 0 , 0 ), // {zmm|m512|mem, zmm}
+ ROW(2, 1, 1, 0, 45 , 60 , 0 , 0 , 0 , 0 ), // #174 {xmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 48 , 46 , 0 , 0 , 0 , 0 ), // {ymm, xmm|m128|mem}
+ ROW(2, 1, 1, 0, 51 , 49 , 0 , 0 , 0 , 0 ), // {zmm, ymm|m256|mem}
+ ROW(2, 1, 1, 0, 45 , 46 , 0 , 0 , 0 , 0 ), // #177 {xmm, xmm|m128|mem}
+ ROW(2, 1, 1, 0, 48 , 49 , 0 , 0 , 0 , 0 ), // {ymm, ymm|m256|mem}
+ ROW(2, 1, 1, 0, 51 , 52 , 0 , 0 , 0 , 0 ), // {zmm, zmm|m512|mem}
+ ROW(3, 1, 1, 0, 60 , 45 , 10 , 0 , 0 , 0 ), // #180 {xmm|m64|mem, xmm, i8|u8}
+ ROW(3, 1, 1, 0, 46 , 48 , 10 , 0 , 0 , 0 ), // #181 {xmm|m128|mem, ymm, i8|u8}
+ ROW(3, 1, 1, 0, 49 , 51 , 10 , 0 , 0 , 0 ), // #182 {ymm|m256|mem, zmm, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 46 , 10 , 0 , 0 , 0 ), // #183 {xmm, xmm|m128|mem, i8|u8}
+ ROW(3, 1, 1, 0, 48 , 49 , 10 , 0 , 0 , 0 ), // {ymm, ymm|m256|mem, i8|u8}
+ ROW(3, 1, 1, 0, 51 , 52 , 10 , 0 , 0 , 0 ), // {zmm, zmm|m512|mem, i8|u8}
+ ROW(2, 1, 1, 0, 45 , 60 , 0 , 0 , 0 , 0 ), // #186 {xmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 48 , 49 , 0 , 0 , 0 , 0 ), // {ymm, ymm|m256|mem}
+ ROW(2, 1, 1, 0, 51 , 52 , 0 , 0 , 0 , 0 ), // {zmm, zmm|m512|mem}
+ ROW(2, 1, 1, 0, 47 , 45 , 0 , 0 , 0 , 0 ), // #189 {m128|mem, xmm}
+ ROW(2, 1, 1, 0, 50 , 48 , 0 , 0 , 0 , 0 ), // {m256|mem, ymm}
+ ROW(2, 1, 1, 0, 53 , 51 , 0 , 0 , 0 , 0 ), // {m512|mem, zmm}
+ ROW(2, 1, 1, 0, 45 , 47 , 0 , 0 , 0 , 0 ), // #192 {xmm, m128|mem}
+ ROW(2, 1, 1, 0, 48 , 50 , 0 , 0 , 0 , 0 ), // {ymm, m256|mem}
+ ROW(2, 1, 1, 0, 51 , 53 , 0 , 0 , 0 , 0 ), // {zmm, m512|mem}
+ ROW(2, 0, 1, 0, 15 , 45 , 0 , 0 , 0 , 0 ), // #195 {r64|m64|mem, xmm}
+ ROW(2, 1, 1, 0, 45 , 85 , 0 , 0 , 0 , 0 ), // {xmm, xmm|m64|mem|r64}
+ ROW(2, 1, 1, 0, 30 , 45 , 0 , 0 , 0 , 0 ), // {m64|mem, xmm}
+ ROW(2, 1, 1, 0, 30 , 45 , 0 , 0 , 0 , 0 ), // #198 {m64|mem, xmm}
+ ROW(2, 1, 1, 0, 45 , 30 , 0 , 0 , 0 , 0 ), // {xmm, m64|mem}
+ ROW(3, 1, 1, 0, 45 , 45 , 45 , 0 , 0 , 0 ), // #200 {xmm, xmm, xmm}
+ ROW(2, 1, 1, 0, 29 , 45 , 0 , 0 , 0 , 0 ), // #201 {m32|mem, xmm}
+ ROW(2, 1, 1, 0, 45 , 29 , 0 , 0 , 0 , 0 ), // {xmm, m32|mem}
+ ROW(3, 1, 1, 0, 45 , 45 , 45 , 0 , 0 , 0 ), // {xmm, xmm, xmm}
+ ROW(2, 1, 1, 0, 86 , 85 , 0 , 0 , 0 , 0 ), // #204 {xmm|ymm, xmm|m64|mem|r64}
+ ROW(2, 0, 1, 0, 51 , 8 , 0 , 0 , 0 , 0 ), // {zmm, r64}
+ ROW(2, 1, 1, 0, 51 , 60 , 0 , 0 , 0 , 0 ), // {zmm, xmm|m64|mem}
+ ROW(4, 1, 1, 0, 84 , 45 , 46 , 10 , 0 , 0 ), // #207 {k, xmm, xmm|m128|mem, i8|u8}
+ ROW(4, 1, 1, 0, 84 , 48 , 49 , 10 , 0 , 0 ), // {k, ymm, ymm|m256|mem, i8|u8}
+ ROW(4, 1, 1, 0, 84 , 51 , 52 , 10 , 0 , 0 ), // {k, zmm, zmm|m512|mem, i8|u8}
+ ROW(3, 1, 1, 0, 82 , 45 , 46 , 0 , 0 , 0 ), // #210 {xmm|k, xmm, xmm|m128|mem}
+ ROW(3, 1, 1, 0, 83 , 48 , 49 , 0 , 0 , 0 ), // {ymm|k, ymm, ymm|m256|mem}
+ ROW(3, 1, 1, 0, 84 , 51 , 52 , 0 , 0 , 0 ), // {k, zmm, zmm|m512|mem}
+ ROW(2, 1, 1, 0, 87 , 45 , 0 , 0 , 0 , 0 ), // #213 {xmm|m32|mem, xmm}
+ ROW(2, 1, 1, 0, 60 , 48 , 0 , 0 , 0 , 0 ), // {xmm|m64|mem, ymm}
+ ROW(2, 1, 1, 0, 46 , 51 , 0 , 0 , 0 , 0 ), // {xmm|m128|mem, zmm}
+ ROW(2, 1, 1, 0, 60 , 45 , 0 , 0 , 0 , 0 ), // #216 {xmm|m64|mem, xmm}
+ ROW(2, 1, 1, 0, 46 , 48 , 0 , 0 , 0 , 0 ), // {xmm|m128|mem, ymm}
+ ROW(2, 1, 1, 0, 49 , 51 , 0 , 0 , 0 , 0 ), // {ymm|m256|mem, zmm}
+ ROW(2, 1, 1, 0, 88 , 45 , 0 , 0 , 0 , 0 ), // #219 {xmm|m16|mem, xmm}
+ ROW(2, 1, 1, 0, 87 , 48 , 0 , 0 , 0 , 0 ), // {xmm|m32|mem, ymm}
+ ROW(2, 1, 1, 0, 60 , 51 , 0 , 0 , 0 , 0 ), // {xmm|m64|mem, zmm}
+ ROW(2, 1, 1, 0, 45 , 87 , 0 , 0 , 0 , 0 ), // #222 {xmm, xmm|m32|mem}
+ ROW(2, 1, 1, 0, 48 , 60 , 0 , 0 , 0 , 0 ), // {ymm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 51 , 46 , 0 , 0 , 0 , 0 ), // {zmm, xmm|m128|mem}
+ ROW(2, 1, 1, 0, 45 , 88 , 0 , 0 , 0 , 0 ), // #225 {xmm, xmm|m16|mem}
+ ROW(2, 1, 1, 0, 48 , 87 , 0 , 0 , 0 , 0 ), // {ymm, xmm|m32|mem}
+ ROW(2, 1, 1, 0, 51 , 60 , 0 , 0 , 0 , 0 ), // {zmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 61 , 45 , 0 , 0 , 0 , 0 ), // #228 {vm32x, xmm}
+ ROW(2, 1, 1, 0, 62 , 48 , 0 , 0 , 0 , 0 ), // {vm32y, ymm}
+ ROW(2, 1, 1, 0, 63 , 51 , 0 , 0 , 0 , 0 ), // {vm32z, zmm}
+ ROW(2, 1, 1, 0, 64 , 45 , 0 , 0 , 0 , 0 ), // #231 {vm64x, xmm}
+ ROW(2, 1, 1, 0, 65 , 48 , 0 , 0 , 0 , 0 ), // {vm64y, ymm}
+ ROW(2, 1, 1, 0, 66 , 51 , 0 , 0 , 0 , 0 ), // {vm64z, zmm}
+ ROW(3, 1, 1, 0, 84 , 45 , 46 , 0 , 0 , 0 ), // #234 {k, xmm, xmm|m128|mem}
+ ROW(3, 1, 1, 0, 84 , 48 , 49 , 0 , 0 , 0 ), // {k, ymm, ymm|m256|mem}
+ ROW(3, 1, 1, 0, 84 , 51 , 52 , 0 , 0 , 0 ), // {k, zmm, zmm|m512|mem}
+ ROW(3, 1, 1, 0, 6 , 6 , 28 , 0 , 0 , 0 ), // #237 {r32, r32, r32|m32|mem}
+ ROW(3, 0, 1, 0, 8 , 8 , 15 , 0 , 0 , 0 ), // {r64, r64, r64|m64|mem}
+ ROW(3, 1, 1, 0, 6 , 28 , 6 , 0 , 0 , 0 ), // #239 {r32, r32|m32|mem, r32}
+ ROW(3, 0, 1, 0, 8 , 15 , 8 , 0 , 0 , 0 ), // {r64, r64|m64|mem, r64}
+ ROW(2, 1, 0, 0, 89 , 28 , 0 , 0 , 0 , 0 ), // #241 {bnd, r32|m32|mem}
+ ROW(2, 0, 1, 0, 89 , 15 , 0 , 0 , 0 , 0 ), // {bnd, r64|m64|mem}
+ ROW(2, 1, 1, 0, 89 , 90 , 0 , 0 , 0 , 0 ), // #243 {bnd, bnd|mem}
+ ROW(2, 1, 1, 0, 91 , 89 , 0 , 0 , 0 , 0 ), // {mem, bnd}
+ ROW(2, 1, 0, 0, 4 , 29 , 0 , 0 , 0 , 0 ), // #245 {r16, m32|mem}
+ ROW(2, 1, 0, 0, 6 , 30 , 0 , 0 , 0 , 0 ), // {r32, m64|mem}
+ ROW(1, 1, 0, 0, 92 , 0 , 0 , 0 , 0 , 0 ), // #247 {rel16|r16|m16|r32|m32}
+ ROW(1, 1, 1, 0, 93 , 0 , 0 , 0 , 0 , 0 ), // {rel32|r64|m64|mem}
+ ROW(2, 1, 1, 0, 6 , 94 , 0 , 0 , 0 , 0 ), // #249 {r32, r8lo|r8hi|m8|r16|m16|r32|m32}
+ ROW(2, 0, 1, 0, 8 , 95 , 0 , 0 , 0 , 0 ), // {r64, r8lo|r8hi|m8|r64|m64}
+ ROW(1, 1, 0, 0, 96 , 0 , 0 , 0 , 0 , 0 ), // #251 {r16|r32}
+ ROW(1, 1, 1, 0, 31 , 0 , 0 , 0 , 0 , 0 ), // #252 {r8lo|r8hi|m8|r16|m16|r32|m32|r64|m64|mem}
+ ROW(2, 1, 0, 0, 97 , 53 , 0 , 0 , 0 , 0 ), // #253 {es:[memBase], m512|mem}
+ ROW(2, 0, 1, 0, 97 , 53 , 0 , 0 , 0 , 0 ), // {es:[memBase], m512|mem}
+ ROW(3, 1, 1, 0, 45 , 10 , 10 , 0 , 0 , 0 ), // #255 {xmm, i8|u8, i8|u8}
+ ROW(2, 1, 1, 0, 45 , 45 , 0 , 0 , 0 , 0 ), // #256 {xmm, xmm}
+ ROW(0, 1, 1, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #257 {}
+ ROW(1, 1, 1, 0, 78 , 0 , 0 , 0 , 0 , 0 ), // #258 {st}
+ ROW(0, 1, 1, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #259 {}
+ ROW(1, 1, 1, 0, 98 , 0 , 0 , 0 , 0 , 0 ), // #260 {m32|m64|st}
+ ROW(2, 1, 1, 0, 45 , 45 , 0 , 0 , 0 , 0 ), // #261 {xmm, xmm}
+ ROW(4, 1, 1, 0, 45 , 45 , 10 , 10 , 0 , 0 ), // {xmm, xmm, i8|u8, i8|u8}
+ ROW(2, 1, 0, 0, 6 , 47 , 0 , 0 , 0 , 0 ), // #263 {r32, m128|mem}
+ ROW(2, 0, 1, 0, 8 , 47 , 0 , 0 , 0 , 0 ), // {r64, m128|mem}
+ ROW(2, 1, 0, 2, 36 , 99 , 0 , 0 , 0 , 0 ), // #265 {<eax>, <ecx>}
+ ROW(2, 0, 1, 2, 100, 99 , 0 , 0 , 0 , 0 ), // {<eax|rax>, <ecx>}
+ ROW(1, 1, 1, 0, 101, 0 , 0 , 0 , 0 , 0 ), // #267 {rel8|rel32}
+ ROW(1, 1, 0, 0, 102, 0 , 0 , 0 , 0 , 0 ), // {rel16}
+ ROW(2, 1, 0, 1, 103, 104, 0 , 0 , 0 , 0 ), // #269 {<cx|ecx>, rel8}
+ ROW(2, 0, 1, 1, 105, 104, 0 , 0 , 0 , 0 ), // {<ecx|rcx>, rel8}
+ ROW(1, 1, 1, 0, 106, 0 , 0 , 0 , 0 , 0 ), // #271 {rel8|rel32|r64|m64|mem}
+ ROW(1, 1, 0, 0, 107, 0 , 0 , 0 , 0 , 0 ), // {rel16|r32|m32|mem}
+ ROW(2, 1, 1, 0, 84 , 108, 0 , 0 , 0 , 0 ), // #273 {k, k|m8|mem|r32|r8lo|r8hi|r16}
+ ROW(2, 1, 1, 0, 109, 84 , 0 , 0 , 0 , 0 ), // {m8|mem|r32|r8lo|r8hi|r16, k}
+ ROW(2, 1, 1, 0, 84 , 110, 0 , 0 , 0 , 0 ), // #275 {k, k|m32|mem|r32}
+ ROW(2, 1, 1, 0, 28 , 84 , 0 , 0 , 0 , 0 ), // {m32|mem|r32, k}
+ ROW(2, 1, 1, 0, 84 , 111, 0 , 0 , 0 , 0 ), // #277 {k, k|m64|mem|r64}
+ ROW(2, 1, 1, 0, 15 , 84 , 0 , 0 , 0 , 0 ), // {m64|mem|r64, k}
+ ROW(2, 1, 1, 0, 84 , 112, 0 , 0 , 0 , 0 ), // #279 {k, k|m16|mem|r32|r16}
+ ROW(2, 1, 1, 0, 113, 84 , 0 , 0 , 0 , 0 ), // {m16|mem|r32|r16, k}
+ ROW(2, 1, 1, 0, 4 , 27 , 0 , 0 , 0 , 0 ), // #281 {r16, r16|m16|mem}
+ ROW(2, 1, 1, 0, 6 , 113, 0 , 0 , 0 , 0 ), // {r32, r32|m16|mem|r16}
+ ROW(2, 1, 0, 0, 4 , 29 , 0 , 0 , 0 , 0 ), // #283 {r16, m32|mem}
+ ROW(2, 1, 0, 0, 6 , 79 , 0 , 0 , 0 , 0 ), // {r32, m48|mem}
+ ROW(2, 1, 1, 0, 4 , 27 , 0 , 0 , 0 , 0 ), // #285 {r16, r16|m16|mem}
+ ROW(2, 1, 1, 0, 114, 113, 0 , 0 , 0 , 0 ), // {r32|r64, r32|m16|mem|r16}
+ ROW(2, 1, 1, 0, 59 , 28 , 0 , 0 , 0 , 0 ), // #287 {mm|xmm, r32|m32|mem}
+ ROW(2, 1, 1, 0, 28 , 59 , 0 , 0 , 0 , 0 ), // {r32|m32|mem, mm|xmm}
+ ROW(2, 1, 1, 0, 45 , 87 , 0 , 0 , 0 , 0 ), // #289 {xmm, xmm|m32|mem}
+ ROW(2, 1, 1, 0, 29 , 45 , 0 , 0 , 0 , 0 ), // {m32|mem, xmm}
+ ROW(2, 1, 1, 0, 4 , 9 , 0 , 0 , 0 , 0 ), // #291 {r16, r8lo|r8hi|m8}
+ ROW(2, 1, 1, 0, 114, 115, 0 , 0 , 0 , 0 ), // {r32|r64, r8lo|r8hi|m8|r16|m16}
+ ROW(4, 1, 1, 1, 6 , 6 , 28 , 35 , 0 , 0 ), // #293 {r32, r32, r32|m32|mem, <edx>}
+ ROW(4, 0, 1, 1, 8 , 8 , 15 , 37 , 0 , 0 ), // {r64, r64, r64|m64|mem, <rdx>}
+ ROW(0, 1, 1, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #295 {}
+ ROW(1, 1, 1, 0, 116, 0 , 0 , 0 , 0 , 0 ), // {r16|m16|r32|m32}
+ ROW(2, 1, 1, 0, 57 , 117, 0 , 0 , 0 , 0 ), // #297 {mm, mm|m64|mem}
+ ROW(2, 1, 1, 0, 45 , 46 , 0 , 0 , 0 , 0 ), // {xmm, xmm|m128|mem}
+ ROW(3, 1, 1, 0, 57 , 117, 10 , 0 , 0 , 0 ), // #299 {mm, mm|m64|mem, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 46 , 10 , 0 , 0 , 0 ), // {xmm, xmm|m128|mem, i8|u8}
+ ROW(3, 1, 1, 0, 6 , 59 , 10 , 0 , 0 , 0 ), // #301 {r32, mm|xmm, i8|u8}
+ ROW(3, 1, 1, 0, 21 , 45 , 10 , 0 , 0 , 0 ), // {m16|mem, xmm, i8|u8}
+ ROW(2, 1, 1, 0, 57 , 118, 0 , 0 , 0 , 0 ), // #303 {mm, i8|u8|mm|m64|mem}
+ ROW(2, 1, 1, 0, 45 , 54 , 0 , 0 , 0 , 0 ), // {xmm, i8|u8|xmm|m128|mem}
+ ROW(1, 1, 0, 0, 6 , 0 , 0 , 0 , 0 , 0 ), // #305 {r32}
+ ROW(1, 0, 1, 0, 8 , 0 , 0 , 0 , 0 , 0 ), // {r64}
+ ROW(0, 1, 1, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #307 {}
+ ROW(1, 1, 1, 0, 119, 0 , 0 , 0 , 0 , 0 ), // {u16}
+ ROW(3, 1, 1, 0, 6 , 28 , 10 , 0 , 0 , 0 ), // #309 {r32, r32|m32|mem, i8|u8}
+ ROW(3, 0, 1, 0, 8 , 15 , 10 , 0 , 0 , 0 ), // {r64, r64|m64|mem, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 46 , 45 , 0 , 0 ), // #311 {xmm, xmm, xmm|m128|mem, xmm}
+ ROW(4, 1, 1, 0, 48 , 48 , 49 , 48 , 0 , 0 ), // {ymm, ymm, ymm|m256|mem, ymm}
+ ROW(2, 1, 1, 0, 45 , 120, 0 , 0 , 0 , 0 ), // #313 {xmm, xmm|m128|ymm|m256}
+ ROW(2, 1, 1, 0, 48 , 52 , 0 , 0 , 0 , 0 ), // {ymm, zmm|m512|mem}
+ ROW(4, 1, 1, 0, 45 , 45 , 45 , 60 , 0 , 0 ), // #315 {xmm, xmm, xmm, xmm|m64|mem}
+ ROW(4, 1, 1, 0, 45 , 45 , 30 , 45 , 0 , 0 ), // {xmm, xmm, m64|mem, xmm}
+ ROW(4, 1, 1, 0, 45 , 45 , 45 , 87 , 0 , 0 ), // #317 {xmm, xmm, xmm, xmm|m32|mem}
+ ROW(4, 1, 1, 0, 45 , 45 , 29 , 45 , 0 , 0 ), // {xmm, xmm, m32|mem, xmm}
+ ROW(4, 1, 1, 0, 48 , 48 , 46 , 10 , 0 , 0 ), // #319 {ymm, ymm, xmm|m128|mem, i8|u8}
+ ROW(4, 1, 1, 0, 51 , 51 , 46 , 10 , 0 , 0 ), // {zmm, zmm, xmm|m128|mem, i8|u8}
+ ROW(1, 1, 0, 1, 36 , 0 , 0 , 0 , 0 , 0 ), // #321 {<eax>}
+ ROW(1, 0, 1, 1, 38 , 0 , 0 , 0 , 0 , 0 ), // #322 {<rax>}
+ ROW(2, 1, 1, 0, 28 , 45 , 0 , 0 , 0 , 0 ), // #323 {r32|m32|mem, xmm}
+ ROW(2, 1, 1, 0, 45 , 28 , 0 , 0 , 0 , 0 ), // {xmm, r32|m32|mem}
+ ROW(2, 1, 1, 0, 30 , 45 , 0 , 0 , 0 , 0 ), // #325 {m64|mem, xmm}
+ ROW(3, 1, 1, 0, 45 , 45 , 30 , 0 , 0 , 0 ), // {xmm, xmm, m64|mem}
+ ROW(2, 1, 0, 0, 28 , 6 , 0 , 0 , 0 , 0 ), // #327 {r32|m32|mem, r32}
+ ROW(2, 0, 1, 0, 15 , 8 , 0 , 0 , 0 , 0 ), // {r64|m64|mem, r64}
+ ROW(2, 1, 0, 0, 6 , 28 , 0 , 0 , 0 , 0 ), // #329 {r32, r32|m32|mem}
+ ROW(2, 0, 1, 0, 8 , 15 , 0 , 0 , 0 , 0 ), // {r64, r64|m64|mem}
+ ROW(3, 1, 1, 0, 45 , 45 , 54 , 0 , 0 , 0 ), // #331 {xmm, xmm, xmm|m128|mem|i8|u8}
+ ROW(3, 1, 1, 0, 45 , 47 , 121, 0 , 0 , 0 ), // {xmm, m128|mem, i8|u8|xmm}
+ ROW(2, 1, 1, 0, 74 , 45 , 0 , 0 , 0 , 0 ), // #333 {vm64x|vm64y, xmm}
+ ROW(2, 1, 1, 0, 66 , 48 , 0 , 0 , 0 , 0 ), // {vm64z, ymm}
+ ROW(3, 1, 1, 0, 45 , 45 , 46 , 0 , 0 , 0 ), // #335 {xmm, xmm, xmm|m128|mem}
+ ROW(3, 1, 1, 0, 45 , 47 , 45 , 0 , 0 , 0 ), // {xmm, m128|mem, xmm}
+ ROW(2, 1, 1, 0, 61 , 86 , 0 , 0 , 0 , 0 ), // #337 {vm32x, xmm|ymm}
+ ROW(2, 1, 1, 0, 62 , 51 , 0 , 0 , 0 , 0 ), // {vm32y, zmm}
+ ROW(1, 1, 0, 1, 33 , 0 , 0 , 0 , 0 , 0 ), // #339 {<ax>}
+ ROW(2, 1, 0, 1, 33 , 10 , 0 , 0 , 0 , 0 ), // #340 {<ax>, i8|u8}
+ ROW(2, 1, 0, 0, 27 , 4 , 0 , 0 , 0 , 0 ), // #341 {r16|m16|mem, r16}
+ ROW(3, 1, 1, 1, 45 , 46 , 122, 0 , 0 , 0 ), // #342 {xmm, xmm|m128|mem, <xmm0>}
+ ROW(2, 1, 1, 0, 89 , 123, 0 , 0 , 0 , 0 ), // #343 {bnd, mib}
+ ROW(2, 1, 1, 0, 89 , 91 , 0 , 0 , 0 , 0 ), // #344 {bnd, mem}
+ ROW(2, 1, 1, 0, 123, 89 , 0 , 0 , 0 , 0 ), // #345 {mib, bnd}
+ ROW(1, 1, 1, 0, 124, 0 , 0 , 0 , 0 , 0 ), // #346 {r16|r32|r64}
+ ROW(1, 1, 1, 1, 33 , 0 , 0 , 0 , 0 , 0 ), // #347 {<ax>}
+ ROW(2, 1, 1, 2, 35 , 36 , 0 , 0 , 0 , 0 ), // #348 {<edx>, <eax>}
+ ROW(1, 1, 1, 0, 91 , 0 , 0 , 0 , 0 , 0 ), // #349 {mem}
+ ROW(1, 1, 1, 1, 125, 0 , 0 , 0 , 0 , 0 ), // #350 {<ds:[memBase|zax]>}
+ ROW(2, 1, 1, 2, 126, 127, 0 , 0 , 0 , 0 ), // #351 {<ds:[memBase|zsi]>, <es:[memBase|zdi]>}
+ ROW(3, 1, 1, 0, 45 , 60 , 10 , 0 , 0 , 0 ), // #352 {xmm, xmm|m64|mem, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 87 , 10 , 0 , 0 , 0 ), // #353 {xmm, xmm|m32|mem, i8|u8}
+ ROW(5, 0, 1, 4, 47 , 37 , 38 , 128, 129, 0 ), // #354 {m128|mem, <rdx>, <rax>, <rcx>, <rbx>}
+ ROW(5, 1, 1, 4, 30 , 35 , 36 , 99 , 130, 0 ), // #355 {m64|mem, <edx>, <eax>, <ecx>, <ebx>}
+ ROW(4, 1, 1, 4, 36 , 130, 99 , 35 , 0 , 0 ), // #356 {<eax>, <ebx>, <ecx>, <edx>}
+ ROW(2, 0, 1, 2, 37 , 38 , 0 , 0 , 0 , 0 ), // #357 {<rdx>, <rax>}
+ ROW(2, 1, 1, 0, 57 , 46 , 0 , 0 , 0 , 0 ), // #358 {mm, xmm|m128|mem}
+ ROW(2, 1, 1, 0, 45 , 117, 0 , 0 , 0 , 0 ), // #359 {xmm, mm|m64|mem}
+ ROW(2, 1, 1, 0, 57 , 60 , 0 , 0 , 0 , 0 ), // #360 {mm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 114, 60 , 0 , 0 , 0 , 0 ), // #361 {r32|r64, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 45 , 131, 0 , 0 , 0 , 0 ), // #362 {xmm, r32|m32|mem|r64|m64}
+ ROW(2, 1, 1, 0, 114, 87 , 0 , 0 , 0 , 0 ), // #363 {r32|r64, xmm|m32|mem}
+ ROW(2, 1, 1, 2, 34 , 33 , 0 , 0 , 0 , 0 ), // #364 {<dx>, <ax>}
+ ROW(1, 1, 1, 1, 36 , 0 , 0 , 0 , 0 , 0 ), // #365 {<eax>}
+ ROW(2, 1, 1, 0, 12 , 10 , 0 , 0 , 0 , 0 ), // #366 {i16|u16, i8|u8}
+ ROW(3, 1, 1, 0, 28 , 45 , 10 , 0 , 0 , 0 ), // #367 {r32|m32|mem, xmm, i8|u8}
+ ROW(1, 1, 1, 0, 80 , 0 , 0 , 0 , 0 , 0 ), // #368 {m80|mem}
+ ROW(1, 1, 1, 0, 132, 0 , 0 , 0 , 0 , 0 ), // #369 {m16|m32}
+ ROW(1, 1, 1, 0, 133, 0 , 0 , 0 , 0 , 0 ), // #370 {m16|m32|m64}
+ ROW(1, 1, 1, 0, 134, 0 , 0 , 0 , 0 , 0 ), // #371 {m32|m64|m80|st}
+ ROW(1, 1, 1, 0, 21 , 0 , 0 , 0 , 0 , 0 ), // #372 {m16|mem}
+ ROW(1, 1, 1, 0, 135, 0 , 0 , 0 , 0 , 0 ), // #373 {ax|m16|mem}
+ ROW(1, 0, 1, 0, 91 , 0 , 0 , 0 , 0 , 0 ), // #374 {mem}
+ ROW(2, 1, 1, 0, 136, 137, 0 , 0 , 0 , 0 ), // #375 {al|ax|eax, i8|u8|dx}
+ ROW(2, 1, 1, 0, 138, 139, 0 , 0 , 0 , 0 ), // #376 {es:[memBase|zdi], dx}
+ ROW(1, 1, 1, 0, 10 , 0 , 0 , 0 , 0 , 0 ), // #377 {i8|u8}
+ ROW(0, 1, 0, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #378 {}
+ ROW(0, 0, 1, 0, 0 , 0 , 0 , 0 , 0 , 0 ), // #379 {}
+ ROW(3, 1, 1, 0, 84 , 84 , 84 , 0 , 0 , 0 ), // #380 {k, k, k}
+ ROW(2, 1, 1, 0, 84 , 84 , 0 , 0 , 0 , 0 ), // #381 {k, k}
+ ROW(3, 1, 1, 0, 84 , 84 , 10 , 0 , 0 , 0 ), // #382 {k, k, i8|u8}
+ ROW(1, 1, 1, 1, 140, 0 , 0 , 0 , 0 , 0 ), // #383 {<ah>}
+ ROW(1, 1, 1, 0, 29 , 0 , 0 , 0 , 0 , 0 ), // #384 {m32|mem}
+ ROW(2, 1, 1, 0, 124, 141, 0 , 0 , 0 , 0 ), // #385 {r16|r32|r64, mem|m8|m16|m32|m48|m64|m80|m128|m256|m512|m1024}
+ ROW(1, 1, 1, 0, 27 , 0 , 0 , 0 , 0 , 0 ), // #386 {r16|m16|mem}
+ ROW(1, 1, 1, 0, 114, 0 , 0 , 0 , 0 , 0 ), // #387 {r32|r64}
+ ROW(2, 1, 1, 2, 142, 126, 0 , 0 , 0 , 0 ), // #388 {<al|ax|eax|rax>, <ds:[memBase|zsi]>}
+ ROW(3, 1, 1, 0, 114, 28 , 14 , 0 , 0 , 0 ), // #389 {r32|r64, r32|m32|mem, i32|u32}
+ ROW(3, 1, 1, 1, 45 , 45 , 143, 0 , 0 , 0 ), // #390 {xmm, xmm, <ds:[memBase|zdi]>}
+ ROW(3, 1, 1, 1, 57 , 57 , 143, 0 , 0 , 0 ), // #391 {mm, mm, <ds:[memBase|zdi]>}
+ ROW(3, 1, 1, 3, 125, 99 , 35 , 0 , 0 , 0 ), // #392 {<ds:[memBase|zax]>, <ecx>, <edx>}
+ ROW(2, 1, 1, 0, 97 , 53 , 0 , 0 , 0 , 0 ), // #393 {es:[memBase], m512|mem}
+ ROW(2, 1, 1, 0, 57 , 45 , 0 , 0 , 0 , 0 ), // #394 {mm, xmm}
+ ROW(2, 1, 1, 0, 6 , 45 , 0 , 0 , 0 , 0 ), // #395 {r32, xmm}
+ ROW(2, 1, 1, 0, 30 , 57 , 0 , 0 , 0 , 0 ), // #396 {m64|mem, mm}
+ ROW(2, 1, 1, 0, 45 , 57 , 0 , 0 , 0 , 0 ), // #397 {xmm, mm}
+ ROW(2, 1, 1, 2, 127, 126, 0 , 0 , 0 , 0 ), // #398 {<es:[memBase|zdi]>, <ds:[memBase|zsi]>}
+ ROW(2, 0, 1, 0, 8 , 28 , 0 , 0 , 0 , 0 ), // #399 {r64, r32|m32|mem}
+ ROW(2, 1, 1, 2, 36 , 99 , 0 , 0 , 0 , 0 ), // #400 {<eax>, <ecx>}
+ ROW(3, 1, 1, 3, 36 , 99 , 130, 0 , 0 , 0 ), // #401 {<eax>, <ecx>, <ebx>}
+ ROW(2, 1, 1, 0, 144, 136, 0 , 0 , 0 , 0 ), // #402 {u8|dx, al|ax|eax}
+ ROW(2, 1, 1, 0, 139, 145, 0 , 0 , 0 , 0 ), // #403 {dx, ds:[memBase|zsi]}
+ ROW(6, 1, 1, 3, 45 , 46 , 10 , 99 , 36 , 35 ), // #404 {xmm, xmm|m128|mem, i8|u8, <ecx>, <eax>, <edx>}
+ ROW(6, 1, 1, 3, 45 , 46 , 10 , 122, 36 , 35 ), // #405 {xmm, xmm|m128|mem, i8|u8, <xmm0>, <eax>, <edx>}
+ ROW(4, 1, 1, 1, 45 , 46 , 10 , 99 , 0 , 0 ), // #406 {xmm, xmm|m128|mem, i8|u8, <ecx>}
+ ROW(4, 1, 1, 1, 45 , 46 , 10 , 122, 0 , 0 ), // #407 {xmm, xmm|m128|mem, i8|u8, <xmm0>}
+ ROW(3, 1, 1, 0, 109, 45 , 10 , 0 , 0 , 0 ), // #408 {r32|m8|mem|r8lo|r8hi|r16, xmm, i8|u8}
+ ROW(3, 0, 1, 0, 15 , 45 , 10 , 0 , 0 , 0 ), // #409 {r64|m64|mem, xmm, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 109, 10 , 0 , 0 , 0 ), // #410 {xmm, r32|m8|mem|r8lo|r8hi|r16, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 28 , 10 , 0 , 0 , 0 ), // #411 {xmm, r32|m32|mem, i8|u8}
+ ROW(3, 0, 1, 0, 45 , 15 , 10 , 0 , 0 , 0 ), // #412 {xmm, r64|m64|mem, i8|u8}
+ ROW(3, 1, 1, 0, 59 , 113, 10 , 0 , 0 , 0 ), // #413 {mm|xmm, r32|m16|mem|r16, i8|u8}
+ ROW(2, 1, 1, 0, 6 , 59 , 0 , 0 , 0 , 0 ), // #414 {r32, mm|xmm}
+ ROW(2, 1, 1, 0, 45 , 10 , 0 , 0 , 0 , 0 ), // #415 {xmm, i8|u8}
+ ROW(2, 1, 1, 0, 31 , 81 , 0 , 0 , 0 , 0 ), // #416 {r8lo|r8hi|m8|r16|m16|r32|m32|r64|m64|mem, cl|i8|u8}
+ ROW(1, 0, 1, 0, 114, 0 , 0 , 0 , 0 , 0 ), // #417 {r32|r64}
+ ROW(3, 1, 1, 3, 35 , 36 , 99 , 0 , 0 , 0 ), // #418 {<edx>, <eax>, <ecx>}
+ ROW(2, 1, 1, 2, 142, 127, 0 , 0 , 0 , 0 ), // #419 {<al|ax|eax|rax>, <es:[memBase|zdi]>}
+ ROW(1, 1, 1, 0, 1 , 0 , 0 , 0 , 0 , 0 ), // #420 {r8lo|r8hi|m8|mem}
+ ROW(1, 1, 1, 0, 146, 0 , 0 , 0 , 0 , 0 ), // #421 {r16|m16|mem|r32|r64}
+ ROW(2, 1, 1, 2, 127, 142, 0 , 0 , 0 , 0 ), // #422 {<es:[memBase|zdi]>, <al|ax|eax|rax>}
+ ROW(6, 1, 1, 0, 51 , 51 , 51 , 51 , 51 , 47 ), // #423 {zmm, zmm, zmm, zmm, zmm, m128|mem}
+ ROW(6, 1, 1, 0, 45 , 45 , 45 , 45 , 45 , 47 ), // #424 {xmm, xmm, xmm, xmm, xmm, m128|mem}
+ ROW(3, 1, 1, 0, 45 , 45 , 60 , 0 , 0 , 0 ), // #425 {xmm, xmm, xmm|m64|mem}
+ ROW(3, 1, 1, 0, 45 , 45 , 87 , 0 , 0 , 0 ), // #426 {xmm, xmm, xmm|m32|mem}
+ ROW(2, 1, 1, 0, 48 , 47 , 0 , 0 , 0 , 0 ), // #427 {ymm, m128|mem}
+ ROW(2, 1, 1, 0, 147, 60 , 0 , 0 , 0 , 0 ), // #428 {ymm|zmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 147, 47 , 0 , 0 , 0 , 0 ), // #429 {ymm|zmm, m128|mem}
+ ROW(2, 1, 1, 0, 51 , 50 , 0 , 0 , 0 , 0 ), // #430 {zmm, m256|mem}
+ ROW(2, 1, 1, 0, 148, 60 , 0 , 0 , 0 , 0 ), // #431 {xmm|ymm|zmm, xmm|m64|mem}
+ ROW(2, 1, 1, 0, 148, 87 , 0 , 0 , 0 , 0 ), // #432 {xmm|ymm|zmm, m32|mem|xmm}
+ ROW(4, 1, 1, 0, 82 , 45 , 60 , 10 , 0 , 0 ), // #433 {xmm|k, xmm, xmm|m64|mem, i8|u8}
+ ROW(4, 1, 1, 0, 82 , 45 , 87 , 10 , 0 , 0 ), // #434 {xmm|k, xmm, xmm|m32|mem, i8|u8}
+ ROW(3, 1, 1, 0, 45 , 45 , 131, 0 , 0 , 0 ), // #435 {xmm, xmm, r32|m32|mem|r64|m64}
+ ROW(3, 1, 1, 0, 46 , 147, 10 , 0 , 0 , 0 ), // #436 {xmm|m128|mem, ymm|zmm, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 60 , 10 , 0 , 0 ), // #437 {xmm, xmm, xmm|m64|mem, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 87 , 10 , 0 , 0 ), // #438 {xmm, xmm, xmm|m32|mem, i8|u8}
+ ROW(3, 1, 1, 0, 84 , 149, 10 , 0 , 0 , 0 ), // #439 {k, xmm|m128|ymm|m256|zmm|m512, i8|u8}
+ ROW(3, 1, 1, 0, 84 , 60 , 10 , 0 , 0 , 0 ), // #440 {k, xmm|m64|mem, i8|u8}
+ ROW(3, 1, 1, 0, 84 , 87 , 10 , 0 , 0 , 0 ), // #441 {k, xmm|m32|mem, i8|u8}
+ ROW(1, 1, 1, 0, 62 , 0 , 0 , 0 , 0 , 0 ), // #442 {vm32y}
+ ROW(1, 1, 1, 0, 63 , 0 , 0 , 0 , 0 , 0 ), // #443 {vm32z}
+ ROW(1, 1, 1, 0, 66 , 0 , 0 , 0 , 0 , 0 ), // #444 {vm64z}
+ ROW(4, 1, 1, 0, 51 , 51 , 49 , 10 , 0 , 0 ), // #445 {zmm, zmm, ymm|m256|mem, i8|u8}
+ ROW(1, 1, 1, 0, 30 , 0 , 0 , 0 , 0 , 0 ), // #446 {m64|mem}
+ ROW(2, 1, 1, 0, 6 , 86 , 0 , 0 , 0 , 0 ), // #447 {r32, xmm|ymm}
+ ROW(2, 1, 1, 0, 148, 150, 0 , 0 , 0 , 0 ), // #448 {xmm|ymm|zmm, xmm|m8|mem|r32|r8lo|r8hi|r16}
+ ROW(2, 1, 1, 0, 148, 151, 0 , 0 , 0 , 0 ), // #449 {xmm|ymm|zmm, xmm|m32|mem|r32}
+ ROW(2, 1, 1, 0, 148, 84 , 0 , 0 , 0 , 0 ), // #450 {xmm|ymm|zmm, k}
+ ROW(2, 1, 1, 0, 148, 152, 0 , 0 , 0 , 0 ), // #451 {xmm|ymm|zmm, xmm|m16|mem|r32|r16}
+ ROW(3, 1, 1, 0, 113, 45 , 10 , 0 , 0 , 0 ), // #452 {r32|m16|mem|r16, xmm, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 109, 10 , 0 , 0 ), // #453 {xmm, xmm, r32|m8|mem|r8lo|r8hi|r16, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 28 , 10 , 0 , 0 ), // #454 {xmm, xmm, r32|m32|mem, i8|u8}
+ ROW(4, 0, 1, 0, 45 , 45 , 15 , 10 , 0 , 0 ), // #455 {xmm, xmm, r64|m64|mem, i8|u8}
+ ROW(4, 1, 1, 0, 45 , 45 , 113, 10 , 0 , 0 ), // #456 {xmm, xmm, r32|m16|mem|r16, i8|u8}
+ ROW(2, 1, 1, 0, 84 , 148, 0 , 0 , 0 , 0 ), // #457 {k, xmm|ymm|zmm}
+ ROW(1, 1, 1, 0, 102, 0 , 0 , 0 , 0 , 0 ), // #458 {rel16|rel32}
+ ROW(3, 1, 1, 2, 91 , 35 , 36 , 0 , 0 , 0 ), // #459 {mem, <edx>, <eax>}
+ ROW(3, 0, 1, 2, 91 , 35 , 36 , 0 , 0 , 0 ) // #460 {mem, <edx>, <eax>}
+};
+#undef ROW
+
+#define ROW(flags, mFlags, extFlags, regId) { uint32_t(flags), uint16_t(mFlags), uint8_t(extFlags), uint8_t(regId) }
+#define F(VAL) InstDB::kOp##VAL
+#define M(VAL) InstDB::kMemOp##VAL
+const InstDB::OpSignature InstDB::_opSignatureTable[] = {
+ ROW(0, 0, 0, 0xFF),
+ ROW(F(GpbLo) | F(GpbHi) | F(Mem), M(M8) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi), 0, 0, 0x00),
+ ROW(F(Gpw) | F(SReg) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Gpw), 0, 0, 0x00),
+ ROW(F(Gpd) | F(SReg) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Gpd), 0, 0, 0x00),
+ ROW(F(Gpq) | F(SReg) | F(CReg) | F(DReg) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpq), 0, 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Mem), M(M8), 0, 0x00),
+ ROW(F(I8) | F(U8), 0, 0, 0x00),
+ ROW(F(Gpw) | F(Mem), M(M16), 0, 0x00),
+ ROW(F(I16) | F(U16), 0, 0, 0x00),
+ ROW(F(Gpd) | F(Mem), M(M32), 0, 0x00),
+ ROW(F(I32) | F(U32), 0, 0, 0x00),
+ ROW(F(Gpq) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(I32), 0, 0, 0x00),
+ ROW(F(SReg) | F(CReg) | F(DReg) | F(Mem) | F(I64) | F(U64), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M8) | M(Any), 0, 0x00),
+ ROW(F(SReg) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(SReg) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(SReg), 0, 0, 0x00),
+ ROW(F(CReg) | F(DReg), 0, 0, 0x00),
+ ROW(F(Gpq) | F(I32), 0, 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Gpq) | F(Mem), M(M16) | M(M32) | M(M64) | M(Any), 0, 0x00),
+ ROW(F(I8), 0, 0, 0x00),
+ ROW(F(Gpw) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Gpd) | F(Gpq) | F(Mem), M(M8) | M(M16) | M(M32) | M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpq) | F(Mem) | F(I32) | F(U32), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpw) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Gpw) | F(Implicit), 0, 0, 0x04),
+ ROW(F(Gpd) | F(Implicit), 0, 0, 0x04),
+ ROW(F(Gpd) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Gpq) | F(Implicit), 0, 0, 0x04),
+ ROW(F(Gpq) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Gpw) | F(Mem) | F(I8) | F(I16), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Mem) | F(I8) | F(I32), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Gpq) | F(Mem) | F(I8) | F(I32), M(M64) | M(Any), 0, 0x00),
+ ROW(F(I8) | F(I16) | F(U16), 0, 0, 0x00),
+ ROW(F(I8) | F(I32) | F(U32), 0, 0, 0x00),
+ ROW(F(I8) | F(I32), 0, 0, 0x00),
+ ROW(F(Xmm), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Mem), M(M128) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M128) | M(Any), 0, 0x00),
+ ROW(F(Ymm), 0, 0, 0x00),
+ ROW(F(Ymm) | F(Mem), M(M256) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M256) | M(Any), 0, 0x00),
+ ROW(F(Zmm), 0, 0, 0x00),
+ ROW(F(Zmm) | F(Mem), M(M512) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M512) | M(Any), 0, 0x00),
+ ROW(F(Xmm) | F(Mem) | F(I8) | F(U8), M(M128) | M(Any), 0, 0x00),
+ ROW(F(Ymm) | F(Mem) | F(I8) | F(U8), M(M256) | M(Any), 0, 0x00),
+ ROW(F(Zmm) | F(Mem) | F(I8) | F(U8), M(M512) | M(Any), 0, 0x00),
+ ROW(F(Mm), 0, 0, 0x00),
+ ROW(F(Gpq) | F(Mm) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Xmm) | F(Mm), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Vm), M(Vm32x), 0, 0x00),
+ ROW(F(Vm), M(Vm32y), 0, 0x00),
+ ROW(F(Vm), M(Vm32z), 0, 0x00),
+ ROW(F(Vm), M(Vm64x), 0, 0x00),
+ ROW(F(Vm), M(Vm64y), 0, 0x00),
+ ROW(F(Vm), M(Vm64z), 0, 0x00),
+ ROW(F(GpbLo) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Gpw) | F(Gpq) | F(Mem), M(M16) | M(M64) | M(Any), 0, 0x00),
+ ROW(F(SReg), 0, 0, 0x1A),
+ ROW(F(SReg), 0, 0, 0x60),
+ ROW(F(Gpw) | F(Gpq) | F(Mem) | F(I8) | F(I16) | F(I32), M(M16) | M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Mem) | F(I32) | F(U32), M(M32), 0, 0x00),
+ ROW(F(SReg), 0, 0, 0x1E),
+ ROW(F(Vm), M(Vm64x) | M(Vm64y), 0, 0x00),
+ ROW(F(I4) | F(U4), 0, 0, 0x00),
+ ROW(F(Mem), M(M32) | M(M64), 0, 0x00),
+ ROW(F(St), 0, 0, 0x01),
+ ROW(F(St), 0, 0, 0x00),
+ ROW(F(Mem), M(M48) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M80) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(I8) | F(U8), 0, 0, 0x02),
+ ROW(F(Xmm) | F(KReg), 0, 0, 0x00),
+ ROW(F(Ymm) | F(KReg), 0, 0, 0x00),
+ ROW(F(KReg), 0, 0, 0x00),
+ ROW(F(Gpq) | F(Xmm) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Xmm) | F(Ymm), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Xmm) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Bnd), 0, 0, 0x00),
+ ROW(F(Bnd) | F(Mem), M(Any), 0, 0x00),
+ ROW(F(Mem), M(Any), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Mem) | F(I32) | F(I64) | F(Rel32), M(M16) | M(M32), 0, 0x00),
+ ROW(F(Gpq) | F(Mem) | F(I32) | F(I64) | F(Rel32), M(M64) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Gpd) | F(Mem), M(M8) | M(M16) | M(M32), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpq) | F(Mem), M(M8) | M(M64), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd), 0, 0, 0x00),
+ ROW(F(Mem), M(BaseOnly) | M(Es), 0, 0x00),
+ ROW(F(St) | F(Mem), M(M32) | M(M64), 0, 0x00),
+ ROW(F(Gpd) | F(Implicit), 0, 0, 0x02),
+ ROW(F(Gpd) | F(Gpq) | F(Implicit), 0, 0, 0x01),
+ ROW(F(I32) | F(I64) | F(Rel8) | F(Rel32), 0, 0, 0x00),
+ ROW(F(I32) | F(I64) | F(Rel32), 0, 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Implicit), 0, 0, 0x02),
+ ROW(F(I32) | F(I64) | F(Rel8), 0, 0, 0x00),
+ ROW(F(Gpd) | F(Gpq) | F(Implicit), 0, 0, 0x02),
+ ROW(F(Gpq) | F(Mem) | F(I32) | F(I64) | F(Rel8) | F(Rel32), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Mem) | F(I32) | F(I64) | F(Rel32), M(M32) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Gpd) | F(KReg) | F(Mem), M(M8) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Gpd) | F(Mem), M(M8) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(KReg) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Gpq) | F(KReg) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(KReg) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Gpq), 0, 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Mem), M(M8) | M(M16), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Mem), M(M16) | M(M32), 0, 0x00),
+ ROW(F(Mm) | F(Mem), M(M64) | M(Any), 0, 0x00),
+ ROW(F(Mm) | F(Mem) | F(I8) | F(U8), M(M64) | M(Any), 0, 0x00),
+ ROW(F(U16), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Ymm) | F(Mem), M(M128) | M(M256), 0, 0x00),
+ ROW(F(Xmm) | F(I8) | F(U8), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Mem), M(Mib), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Gpq), 0, 0, 0x00),
+ ROW(F(Mem) | F(Implicit), M(BaseOnly) | M(Ds), 0, 0x01),
+ ROW(F(Mem) | F(Implicit), M(BaseOnly) | M(Ds), 0, 0x40),
+ ROW(F(Mem) | F(Implicit), M(BaseOnly) | M(Es), 0, 0x80),
+ ROW(F(Gpq) | F(Implicit), 0, 0, 0x02),
+ ROW(F(Gpq) | F(Implicit), 0, 0, 0x08),
+ ROW(F(Gpd) | F(Implicit), 0, 0, 0x08),
+ ROW(F(Gpd) | F(Gpq) | F(Mem), M(M32) | M(M64) | M(Any), 0, 0x00),
+ ROW(F(Mem), M(M16) | M(M32), 0, 0x00),
+ ROW(F(Mem), M(M16) | M(M32) | M(M64), 0, 0x00),
+ ROW(F(St) | F(Mem), M(M32) | M(M64) | M(M80), 0, 0x00),
+ ROW(F(Gpw) | F(Mem), M(M16) | M(Any), 0, 0x01),
+ ROW(F(GpbLo) | F(Gpw) | F(Gpd), 0, 0, 0x01),
+ ROW(F(Gpw) | F(I8) | F(U8), 0, 0, 0x04),
+ ROW(F(Mem), M(BaseOnly) | M(Es), 0, 0x80),
+ ROW(F(Gpw), 0, 0, 0x04),
+ ROW(F(GpbHi) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Mem), M(M8) | M(M16) | M(M32) | M(M48) | M(M64) | M(M80) | M(M128) | M(M256) | M(M512) | M(M1024) | M(Any), 0, 0x00),
+ ROW(F(GpbLo) | F(Gpw) | F(Gpd) | F(Gpq) | F(Implicit), 0, 0, 0x01),
+ ROW(F(Mem) | F(Implicit), M(BaseOnly) | M(Ds), 0, 0x80),
+ ROW(F(Gpw) | F(U8), 0, 0, 0x04),
+ ROW(F(Mem), M(BaseOnly) | M(Ds), 0, 0x40),
+ ROW(F(Gpw) | F(Gpd) | F(Gpq) | F(Mem), M(M16) | M(Any), 0, 0x00),
+ ROW(F(Ymm) | F(Zmm), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Ymm) | F(Zmm), 0, 0, 0x00),
+ ROW(F(Xmm) | F(Ymm) | F(Zmm) | F(Mem), M(M128) | M(M256) | M(M512), 0, 0x00),
+ ROW(F(GpbLo) | F(GpbHi) | F(Gpw) | F(Gpd) | F(Xmm) | F(Mem), M(M8) | M(Any), 0, 0x00),
+ ROW(F(Gpd) | F(Xmm) | F(Mem), M(M32) | M(Any), 0, 0x00),
+ ROW(F(Gpw) | F(Gpd) | F(Xmm) | F(Mem), M(M16) | M(Any), 0, 0x00)
+};
+#undef M
+#undef F
+#undef ROW
+// ----------------------------------------------------------------------------
+// ${InstSignatureTable:End}
+#endif // !ASMJIT_NO_VALIDATION
+
+// ============================================================================
+// [asmjit::x86::InstInternal - QueryRWInfo]
+// ============================================================================
+
+// ${InstRWInfoTable:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+const uint8_t InstDB::rwInfoIndex[Inst::_kIdCount * 2] = {
+ 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 2, 0, 3, 0, 2, 0, 4, 0, 4, 0, 5, 0, 6, 0, 4, 0,
+ 4, 0, 3, 0, 4, 0, 4, 0, 4, 0, 4, 0, 7, 0, 0, 7, 2, 0, 0, 8, 4, 0, 4, 0, 4, 0,
+ 4, 0, 9, 0, 0, 10, 11, 0, 11, 0, 11, 0, 11, 0, 11, 0, 0, 4, 0, 4, 0, 12, 0, 12,
+ 11, 0, 11, 0, 11, 0, 11, 0, 11, 0, 13, 0, 13, 0, 13, 0, 14, 0, 14, 0, 15, 0,
+ 16, 0, 17, 0, 11, 0, 11, 0, 0, 18, 19, 0, 20, 0, 20, 0, 20, 0, 0, 10, 0, 21,
+ 0, 1, 22, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 24, 0, 24, 0, 24, 0, 0, 0, 0, 0, 0, 0,
+ 24, 0, 25, 0, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0,
+ 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0,
+ 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 26, 0, 0, 4, 0, 4, 27, 0, 0, 5, 0,
+ 6, 0, 28, 0, 29, 0, 30, 31, 0, 32, 0, 0, 33, 34, 0, 35, 0, 36, 0, 7, 0, 37, 0,
+ 37, 0, 37, 0, 36, 0, 38, 0, 7, 0, 36, 0, 39, 0, 40, 0, 41, 0, 42, 0, 43, 0, 44,
+ 0, 45, 0, 37, 0, 37, 0, 7, 0, 39, 0, 40, 0, 45, 0, 46, 0, 0, 47, 0, 1, 0, 1,
+ 0, 48, 49, 50, 4, 0, 4, 0, 5, 0, 6, 0, 0, 4, 0, 4, 0, 0, 51, 0, 51, 0, 0, 0,
+ 0, 52, 53, 54, 0, 0, 0, 0, 55, 56, 0, 57, 0, 58, 0, 59, 0, 0, 0, 0, 0, 57, 0,
+ 57, 0, 57, 0, 57, 0, 57, 0, 57, 0, 57, 0, 57, 0, 60, 0, 61, 0, 61, 0, 60, 0,
+ 0, 0, 0, 0, 0, 55, 56, 0, 57, 55, 56, 0, 57, 0, 0, 0, 57, 0, 56, 0, 56, 0, 56,
+ 0, 56, 0, 56, 0, 56, 0, 56, 0, 0, 0, 0, 0, 62, 0, 62, 0, 62, 0, 56, 0, 56, 0,
+ 60, 0, 0, 0, 63, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 55, 56, 0, 57, 0,
+ 0, 0, 0, 0, 0, 0, 64, 0, 65, 0, 64, 0, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 24,
+ 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 67, 0, 65, 0, 64, 0, 67, 0, 66, 55, 56, 0,
+ 57, 55, 56, 0, 57, 0, 0, 0, 61, 0, 61, 0, 61, 0, 61, 0, 0, 0, 0, 0, 0, 0, 57,
+ 0, 24, 0, 24, 0, 64, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 4, 4, 0, 4, 0,
+ 4, 0, 0, 0, 4, 0, 4, 0, 49, 50, 68, 69, 70, 0, 0, 48, 71, 0, 0, 72, 53, 53, 0,
+ 0, 0, 0, 0, 0, 0, 0, 73, 0, 0, 24, 74, 0, 73, 0, 73, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 76, 0, 77, 0, 78, 0, 79, 0, 76, 0,
+ 77, 0, 76, 0, 77, 0, 78, 0, 79, 0, 78, 0, 79, 80, 0, 81, 0, 82, 0, 83, 0, 84,
+ 0, 85, 0, 86, 0, 87, 0, 0, 76, 0, 77, 0, 78, 88, 0, 89, 0, 90, 0, 91, 0, 0, 79,
+ 0, 84, 0, 85, 0, 86, 0, 87, 0, 84, 0, 85, 0, 86, 0, 87, 88, 0, 89, 0, 90, 0,
+ 91, 0, 0, 92, 0, 93, 0, 94, 0, 76, 0, 77, 0, 78, 0, 79, 0, 76, 0, 77, 0, 78,
+ 0, 79, 0, 95, 96, 0, 97, 0, 0, 98, 99, 0, 100, 0, 0, 0, 99, 0, 0, 0, 99, 0, 0,
+ 24, 99, 0, 0, 24, 0, 101, 0, 102, 0, 101, 103, 0, 104, 0, 104, 0, 104, 0, 96,
+ 0, 99, 0, 0, 101, 0, 105, 0, 105, 11, 0, 0, 106, 0, 107, 4, 0, 4, 0, 5, 0, 6,
+ 0, 0, 0, 4, 0, 4, 0, 5, 0, 6, 0, 0, 108, 0, 108, 109, 0, 110, 0, 110, 0, 111,
+ 0, 81, 0, 36, 0, 112, 0, 111, 0, 86, 0, 110, 0, 110, 0, 113, 0, 114, 0, 114,
+ 0, 115, 0, 116, 0, 116, 0, 117, 0, 117, 0, 97, 0, 97, 0, 111, 0, 97, 0, 97, 0,
+ 116, 0, 116, 0, 118, 0, 82, 0, 86, 0, 119, 0, 82, 0, 7, 0, 7, 0, 81, 0, 120,
+ 0, 121, 0, 110, 0, 110, 0, 120, 0, 0, 4, 49, 122, 4, 0, 4, 0, 5, 0, 6, 0, 0,
+ 123, 124, 0, 0, 125, 0, 48, 0, 126, 0, 48, 2, 0, 4, 0, 4, 0, 127, 0, 128, 0, 11,
+ 0, 11, 0, 11, 0, 3, 0, 3, 0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0,
+ 3, 0, 3, 0, 0, 3, 3, 0, 3, 0, 0, 0, 3, 0, 129, 0, 3, 0, 0, 12, 0, 4, 0, 4, 3,
+ 0, 3, 0, 4, 0, 3, 0, 0, 130, 0, 131, 3, 0, 3, 0, 4, 0, 3, 0, 0, 132, 0, 133,
+ 0, 0, 0, 8, 0, 8, 0, 134, 0, 52, 0, 135, 0, 136, 39, 0, 39, 0, 129, 0, 129, 0,
+ 129, 0, 129, 0, 129, 0, 129, 0, 129, 0, 129, 0, 129, 0, 129, 0, 39, 0, 129,
+ 0, 129, 0, 129, 0, 39, 0, 39, 0, 129, 0, 129, 0, 129, 0, 3, 0, 3, 0, 3, 0, 137,
+ 0, 3, 0, 3, 0, 3, 0, 39, 0, 39, 0, 0, 138, 0, 72, 0, 139, 0, 140, 3, 0, 3, 0,
+ 4, 0, 4, 0, 3, 0, 3, 0, 4, 0, 4, 0, 4, 0, 4, 0, 3, 0, 3, 0, 4, 0, 4, 0, 141,
+ 0, 142, 0, 143, 0, 36, 0, 36, 0, 36, 0, 142, 0, 142, 0, 143, 0, 36, 0, 36, 0,
+ 36, 0, 142, 0, 4, 0, 3, 0, 129, 0, 3, 0, 3, 0, 4, 0, 3, 0, 3, 0, 0, 144, 0, 0,
+ 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24,
+ 0, 24, 3, 0, 3, 0, 0, 7, 0, 7, 0, 7, 0, 39, 3, 0, 3, 0, 3, 0, 3, 0, 54, 0,
+ 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 54, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 0,
+ 3, 0, 3, 0, 3, 0, 39, 0, 145, 0, 3, 0, 3, 0, 4, 0, 3, 0, 3, 0, 3, 0, 4, 0, 3,
+ 0, 0, 146, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 147, 0, 7, 0, 148, 0, 147, 0,
+ 0, 149, 0, 149, 0, 150, 0, 149, 0, 150, 0, 149, 0, 149, 151, 0, 0, 152, 0, 0,
+ 147, 0, 147, 0, 0, 11, 0, 7, 0, 7, 0, 38, 0, 148, 0, 0, 7, 0, 148, 0, 0, 153,
+ 147, 0, 147, 0, 0, 10, 2, 0, 154, 0, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0,
+ 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155,
+ 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 155,
+ 0, 155, 0, 155, 0, 155, 0, 155, 0, 155, 0, 0, 0, 64, 4, 0, 4, 0, 4, 0, 0, 4,
+ 4, 0, 4, 0, 0, 12, 147, 0, 0, 156, 0, 10, 147, 0, 0, 156, 0, 10, 0, 4, 0, 4,
+ 0, 64, 0, 47, 0, 157, 0, 149, 0, 157, 7, 0, 7, 0, 38, 0, 148, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 158, 159, 0, 0, 157, 2, 0, 4, 0, 4, 0, 5, 0, 6, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 19, 0, 11, 0, 11, 0, 31, 0, 32, 0,
+ 0, 0, 4, 0, 4, 0, 4, 0, 4, 0, 0, 160, 0, 161, 0, 160, 0, 161, 0, 8, 0, 8, 0, 162,
+ 0, 163, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 7, 0, 0, 7, 0, 8, 0, 8, 0, 8,
+ 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 164, 0, 164,
+ 165, 0, 40, 0, 166, 0, 167, 0, 166, 0, 167, 0, 165, 0, 40, 0, 166, 0, 167,
+ 0, 166, 0, 167, 0, 168, 0, 169, 0, 0, 8, 0, 8, 0, 170, 0, 171, 31, 0, 32, 0,
+ 172, 0, 172, 0, 173, 0, 11, 0, 0, 8, 120, 0, 174, 0, 174, 0, 11, 0, 174, 0, 11,
+ 0, 173, 0, 11, 0, 173, 0, 0, 175, 173, 0, 11, 0, 173, 0, 11, 0, 174, 0, 40,
+ 0, 0, 176, 40, 0, 0, 177, 0, 178, 0, 179, 45, 0, 45, 0, 174, 0, 11, 0, 174, 0,
+ 11, 0, 11, 0, 173, 0, 11, 0, 173, 0, 40, 0, 40, 0, 45, 0, 45, 0, 173, 0, 11,
+ 0, 11, 0, 174, 0, 0, 177, 0, 178, 0, 8, 0, 8, 0, 8, 0, 162, 0, 163, 0, 8, 0, 180,
+ 0, 8, 0, 101, 0, 101, 181, 0, 181, 0, 11, 0, 11, 0, 0, 182, 0, 183, 0, 184,
+ 0, 183, 0, 184, 0, 182, 0, 183, 0, 184, 0, 183, 0, 184, 0, 52, 0, 185, 0, 185,
+ 0, 186, 0, 187, 0, 185, 0, 185, 0, 188, 0, 189, 0, 185, 0, 185, 0, 188, 0,
+ 189, 0, 185, 0, 185, 0, 188, 0, 189, 0, 190, 0, 190, 0, 191, 0, 192, 0, 185, 0,
+ 185, 0, 185, 0, 185, 0, 185, 0, 185, 0, 190, 0, 190, 0, 185, 0, 185, 0, 188,
+ 0, 189, 0, 185, 0, 185, 0, 188, 0, 189, 0, 185, 0, 185, 0, 188, 0, 189, 0, 185,
+ 0, 185, 0, 185, 0, 185, 0, 185, 0, 185, 0, 190, 0, 190, 0, 190, 0, 190, 0,
+ 191, 0, 192, 0, 185, 0, 185, 0, 188, 0, 189, 0, 185, 0, 185, 0, 188, 0, 189, 0,
+ 185, 0, 185, 0, 188, 0, 189, 0, 190, 0, 190, 0, 191, 0, 192, 0, 185, 0, 185,
+ 0, 188, 0, 189, 0, 185, 0, 185, 0, 188, 0, 189, 0, 185, 0, 185, 0, 193, 0, 194,
+ 0, 190, 0, 190, 0, 191, 0, 192, 0, 195, 0, 195, 0, 39, 0, 121, 11, 0, 11, 0,
+ 39, 0, 196, 0, 99, 197, 99, 198, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 0,
+ 24, 0, 24, 99, 198, 99, 199, 11, 0, 11, 0, 0, 200, 0, 201, 0, 11, 0, 11, 0,
+ 200, 0, 201, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 202, 0, 203, 0, 204,
+ 0, 203, 0, 204, 0, 202, 0, 203, 0, 204, 0, 203, 0, 204, 0, 163, 111, 0, 0, 98,
+ 0, 106, 0, 205, 0, 205, 0, 8, 0, 8, 0, 162, 0, 163, 0, 0, 0, 206, 0, 0, 0, 8,
+ 0, 8, 0, 162, 0, 163, 0, 0, 0, 207, 0, 0, 208, 0, 208, 0, 81, 0, 209, 0, 208,
+ 0, 208, 0, 208, 0, 208, 0, 208, 0, 208, 0, 208, 0, 208, 0, 0, 210, 211, 212,
+ 211, 212, 0, 213, 116, 214, 116, 214, 215, 0, 216, 0, 111, 0, 111, 0, 111, 0,
+ 111, 0, 217, 0, 116, 218, 11, 0, 11, 0, 118, 219, 208, 0, 208, 0, 0, 8, 0, 220,
+ 0, 206, 172, 0, 0, 0, 0, 221, 0, 207, 0, 8, 0, 8, 0, 162, 0, 163, 222, 0, 0,
+ 220, 0, 8, 0, 8, 0, 223, 0, 223, 11, 0, 11, 0, 11, 0, 11, 0, 0, 8, 0, 8, 0,
+ 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0,
+ 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 164, 0, 8, 224, 0, 45, 0, 225, 0, 225,
+ 0, 40, 0, 226, 0, 0, 8, 0, 190, 0, 227, 0, 227, 0, 8, 0, 8, 0, 8, 0, 8, 0,
+ 130, 0, 131, 0, 8, 0, 8, 0, 8, 0, 8, 0, 132, 0, 133, 0, 227, 0, 227, 0, 227, 0,
+ 227, 0, 227, 0, 227, 0, 180, 0, 180, 172, 0, 172, 0, 172, 0, 172, 0, 0, 180,
+ 0, 180, 0, 180, 0, 180, 0, 180, 0, 180, 11, 0, 11, 0, 0, 185, 0, 185, 0, 185,
+ 0, 185, 0, 228, 0, 228, 0, 8, 0, 8, 0, 8, 0, 185, 0, 8, 0, 8, 0, 185, 0, 185,
+ 0, 190, 0, 190, 0, 229, 0, 229, 0, 229, 0, 8, 0, 229, 0, 8, 0, 185, 0, 185, 0,
+ 185, 0, 185, 0, 185, 0, 8, 11, 0, 11, 0, 11, 0, 11, 0, 0, 134, 0, 52, 0, 135,
+ 0, 230, 99, 198, 99, 197, 99, 199, 99, 198, 7, 0, 7, 0, 7, 0, 0, 8, 7, 0, 0,
+ 8, 7, 0, 7, 0, 7, 0, 7, 0, 7, 0, 7, 0, 0, 8, 7, 0, 7, 0, 137, 0, 7, 0, 0, 8,
+ 7, 0, 0, 8, 0, 8, 7, 0, 0, 231, 0, 163, 0, 162, 0, 232, 11, 0, 11, 0, 0, 233,
+ 0, 233, 0, 233, 0, 233, 0, 233, 0, 233, 0, 233, 0, 233, 0, 233, 0, 233, 0, 233,
+ 0, 233, 0, 185, 0, 185, 0, 8, 0, 8, 0, 205, 0, 205, 0, 8, 0, 8, 0, 8, 0, 8,
+ 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 234, 0,
+ 234, 0, 235, 0, 175, 0, 225, 0, 225, 0, 225, 0, 225, 0, 141, 0, 234, 0, 236,
+ 0, 175, 0, 235, 0, 235, 0, 175, 0, 236, 0, 175, 0, 235, 0, 175, 0, 237, 0, 238,
+ 0, 173, 0, 173, 0, 173, 0, 237, 0, 235, 0, 175, 0, 236, 0, 175, 0, 235, 0,
+ 175, 0, 234, 0, 175, 0, 237, 0, 238, 0, 173, 0, 173, 0, 173, 0, 237, 0, 0, 8,
+ 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 11, 0, 11, 0, 11, 0, 11, 0, 0,
+ 8, 0, 8, 0, 8, 0, 239, 0, 11, 0, 11, 0, 8, 0, 8, 0, 11, 0, 11, 0, 8, 0, 8, 0,
+ 240, 0, 240, 0, 240, 0, 240, 0, 8, 111, 0, 111, 0, 241, 0, 111, 0, 0, 240, 0,
+ 240, 0, 240, 0, 240, 0, 240, 0, 240, 0, 8, 0, 8, 0, 185, 0, 185, 0, 185, 0, 8,
+ 0, 240, 0, 240, 0, 8, 0, 8, 0, 185, 0, 185, 0, 185, 0, 8, 0, 8, 0, 227, 0, 11,
+ 0, 11, 0, 11, 0, 8, 0, 8, 0, 8, 0, 242, 0, 243, 0, 242, 0, 8, 0, 8, 0, 8, 0,
+ 242, 0, 242, 0, 242, 0, 8, 0, 8, 0, 8, 0, 242, 0, 242, 0, 243, 0, 242, 0, 8,
+ 0, 8, 0, 8, 0, 242, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 185, 0,
+ 185, 222, 0, 0, 227, 0, 227, 0, 227, 0, 227, 0, 227, 0, 227, 0, 227, 0, 227,
+ 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8,
+ 0, 200, 0, 201, 11, 0, 11, 0, 0, 200, 0, 201, 181, 0, 181, 0, 0, 200, 0, 201,
+ 11, 0, 0, 201, 0, 11, 0, 11, 0, 200, 0, 201, 0, 11, 0, 11, 0, 200, 0, 201, 0,
+ 11, 0, 11, 0, 200, 0, 201, 11, 0, 11, 0, 0, 200, 0, 201, 181, 0, 181, 0, 0, 200,
+ 0, 201, 11, 0, 0, 201, 0, 8, 0, 8, 0, 162, 0, 163, 111, 0, 111, 0, 0, 24,
+ 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 0, 24, 111, 0, 241, 0, 0, 8, 0, 8, 0,
+ 8, 0, 8, 0, 8, 0, 8, 11, 0, 11, 0, 0, 200, 0, 201, 0, 158, 0, 8, 0, 8, 0, 162,
+ 0, 163, 222, 0, 222, 0, 31, 0, 32, 0, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 102, 0, 102, 0, 244, 0, 0, 245, 0, 0, 0, 246, 0, 0,
+ 0, 0, 150, 0, 0, 2, 0, 4, 0, 4, 0, 0, 247, 0, 247, 0, 247, 0, 247, 0, 248, 0,
+ 248, 0, 248, 0, 248, 0, 248, 0, 248, 0, 248, 0, 248, 0, 244, 0, 0
+};
+
+const InstDB::RWInfo InstDB::rwInfo[] = {
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #0 [ref=1609x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 1 , 0 , 0 , 0 , 0 , 0 } }, // #1 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 1 , { 2 , 3 , 0 , 0 , 0 , 0 } }, // #2 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 2 , { 2 , 3 , 0 , 0 , 0 , 0 } }, // #3 [ref=100x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 4 , 5 , 0 , 0 , 0 , 0 } }, // #4 [ref=69x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 6 , 7 , 0 , 0 , 0 , 0 } }, // #5 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 8 , 9 , 0 , 0 , 0 , 0 } }, // #6 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 10, 5 , 0 , 0 , 0 , 0 } }, // #7 [ref=33x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 11, 3 , 3 , 0 , 0 , 0 } }, // #8 [ref=186x]
+ { InstDB::RWInfo::kCategoryGeneric , 7 , { 12, 13, 0 , 0 , 0 , 0 } }, // #9 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 2 , { 11, 3 , 3 , 0 , 0 , 0 } }, // #10 [ref=5x]
+ { InstDB::RWInfo::kCategoryGeneric , 2 , { 11, 3 , 0 , 0 , 0 , 0 } }, // #11 [ref=80x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 4 , 5 , 14, 0 , 0 , 0 } }, // #12 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 2 , { 5 , 3 , 0 , 0 , 0 , 0 } }, // #13 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 10, 3 , 0 , 0 , 0 , 0 } }, // #14 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 9 , { 10, 5 , 0 , 0 , 0 , 0 } }, // #15 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 11, 5 , 0 , 0 , 0 , 0 } }, // #16 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 3 , 3 , 0 , 0 , 0 , 0 } }, // #17 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 2 , 0 , 0 , 0 , 0 , 0 } }, // #18 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 3 , 3 , 0 , 0 , 0 , 0 } }, // #19 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 2 , 3 , 0 , 0 , 0 , 0 } }, // #20 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 11, { 3 , 0 , 0 , 0 , 0 , 0 } }, // #21 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 15, 16, 0 , 0 , 0 , 0 } }, // #22 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 17, 0 , 0 , 0 , 0 , 0 } }, // #23 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 3 , 0 , 0 , 0 , 0 , 0 } }, // #24 [ref=34x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 18, 0 , 0 , 0 , 0 , 0 } }, // #25 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 1 , { 3 , 3 , 0 , 0 , 0 , 0 } }, // #26 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 19, 20, 0 , 0 , 0 , 0 } }, // #27 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 11, { 2 , 3 , 21, 0 , 0 , 0 } }, // #28 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 4 , 22, 17, 23, 24, 0 } }, // #29 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 25, 26, 27, 28, 29, 0 } }, // #30 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 7 , 7 , 0 , 0 , 0 , 0 } }, // #31 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 9 , 9 , 0 , 0 , 0 , 0 } }, // #32 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 27, 30, 31, 15, 0 , 0 } }, // #33 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 32, 33, 0 , 0 , 0 , 0 } }, // #34 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 14, { 2 , 3 , 0 , 0 , 0 , 0 } }, // #35 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 10, 7 , 0 , 0 , 0 , 0 } }, // #36 [ref=10x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 34, 5 , 0 , 0 , 0 , 0 } }, // #37 [ref=5x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 35, 7 , 0 , 0 , 0 , 0 } }, // #38 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 34, 7 , 0 , 0 , 0 , 0 } }, // #39 [ref=13x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 11, 7 , 0 , 0 , 0 , 0 } }, // #40 [ref=9x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 36, 7 , 0 , 0 , 0 , 0 } }, // #41 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 14, { 35, 3 , 0 , 0 , 0 , 0 } }, // #42 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 14, { 36, 3 , 0 , 0 , 0 , 0 } }, // #43 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 35, 9 , 0 , 0 , 0 , 0 } }, // #44 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 11, 9 , 0 , 0 , 0 , 0 } }, // #45 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 37, 38, 0 , 0 , 0 , 0 } }, // #46 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 27, 0 , 0 , 0 , 0 , 0 } }, // #47 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 2 , 0 , 0 , 0 , 0 , 0 } }, // #48 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 15, { 1 , 39, 0 , 0 , 0 , 0 } }, // #49 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 40, 41, 3 , 0 , 0 , 0 } }, // #50 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 16, { 42, 43, 0 , 0 , 0 , 0 } }, // #51 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 17, { 42, 5 , 0 , 0 , 0 , 0 } }, // #52 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 4 , 5 , 0 , 0 , 0 , 0 } }, // #53 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 4 , 0 , 0 , 0 , 0 , 0 } }, // #54 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 44, 45, 0 , 0 , 0 , 0 } }, // #55 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 18, { 3 , 0 , 0 , 0 , 0 , 0 } }, // #56 [ref=15x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 44, 0 , 0 , 0 , 0 , 0 } }, // #57 [ref=16x]
+ { InstDB::RWInfo::kCategoryGeneric , 19, { 45, 0 , 0 , 0 , 0 , 0 } }, // #58 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 19, { 46, 0 , 0 , 0 , 0 , 0 } }, // #59 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 20, { 3 , 0 , 0 , 0 , 0 , 0 } }, // #60 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 45, 0 , 0 , 0 , 0 , 0 } }, // #61 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 18, { 11, 0 , 0 , 0 , 0 , 0 } }, // #62 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 21, { 13, 0 , 0 , 0 , 0 , 0 } }, // #63 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 11, 0 , 0 , 0 , 0 , 0 } }, // #64 [ref=8x]
+ { InstDB::RWInfo::kCategoryGeneric , 21, { 47, 0 , 0 , 0 , 0 , 0 } }, // #65 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 7 , { 48, 0 , 0 , 0 , 0 , 0 } }, // #66 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 20, { 11, 0 , 0 , 0 , 0 , 0 } }, // #67 [ref=2x]
+ { InstDB::RWInfo::kCategoryImul , 2 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #68 [ref=1x]
+ { InstDB::RWInfo::kCategoryImul , 22, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #69 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 49, 50, 0 , 0 , 0 , 0 } }, // #70 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 51, 50, 0 , 0 , 0 , 0 } }, // #71 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 4 , 9 , 0 , 0 , 0 , 0 } }, // #72 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 3 , 5 , 0 , 0 , 0 , 0 } }, // #73 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 21, 28, 0 , 0 , 0 , 0 } }, // #74 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 52, 0 , 0 , 0 , 0 , 0 } }, // #75 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 53, 39, 39, 0 , 0 , 0 } }, // #76 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 42, 9 , 9 , 0 , 0 , 0 } }, // #77 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 34, 7 , 7 , 0 , 0 , 0 } }, // #78 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 47, 13, 13, 0 , 0 , 0 } }, // #79 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 23, { 53, 39, 0 , 0 , 0 , 0 } }, // #80 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 24, { 42, 9 , 0 , 0 , 0 , 0 } }, // #81 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 25, { 34, 7 , 0 , 0 , 0 , 0 } }, // #82 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 26, { 47, 13, 0 , 0 , 0 , 0 } }, // #83 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 53, 39, 0 , 0 , 0 , 0 } }, // #84 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 42, 9 , 0 , 0 , 0 , 0 } }, // #85 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 34, 7 , 0 , 0 , 0 , 0 } }, // #86 [ref=5x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 47, 13, 0 , 0 , 0 , 0 } }, // #87 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 39, 39, 0 , 0 , 0 , 0 } }, // #88 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 9 , 9 , 0 , 0 , 0 , 0 } }, // #89 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 7 , 7 , 0 , 0 , 0 , 0 } }, // #90 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 13, 13, 0 , 0 , 0 , 0 } }, // #91 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 47, 39, 39, 0 , 0 , 0 } }, // #92 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 34, 9 , 9 , 0 , 0 , 0 } }, // #93 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 42, 13, 13, 0 , 0 , 0 } }, // #94 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 54, 0 , 0 , 0 , 0 , 0 } }, // #95 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 27, { 11, 3 , 0 , 0 , 0 , 0 } }, // #96 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 10, 5 , 0 , 0 , 0 , 0 } }, // #97 [ref=5x]
+ { InstDB::RWInfo::kCategoryGeneric , 28, { 9 , 0 , 0 , 0 , 0 , 0 } }, // #98 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 2 , 3 , 0 , 0 , 0 , 0 } }, // #99 [ref=13x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 11, 3 , 0 , 0 , 0 , 0 } }, // #100 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 7 , { 13, 0 , 0 , 0 , 0 , 0 } }, // #101 [ref=5x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 3 , 0 , 0 , 0 , 0 , 0 } }, // #102 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 49, 19, 0 , 0 , 0 , 0 } }, // #103 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 55, 0 , 0 , 0 , 0 , 0 } }, // #104 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 3 , 9 , 0 , 0 , 0 , 0 } }, // #105 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 5 , 5 , 20, 0 , 0 , 0 } }, // #106 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 7 , 7 , 20, 0 , 0 , 0 } }, // #107 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 18, 28, 56, 0 , 0 , 0 } }, // #108 [ref=2x]
+ { InstDB::RWInfo::kCategoryMov , 29, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #109 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 30, { 10, 5 , 0 , 0 , 0 , 0 } }, // #110 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 11, 3 , 0 , 0 , 0 , 0 } }, // #111 [ref=14x]
+ { InstDB::RWInfo::kCategoryGeneric , 16, { 11, 43, 0 , 0 , 0 , 0 } }, // #112 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 35, 57, 0 , 0 , 0 , 0 } }, // #113 [ref=1x]
+ { InstDB::RWInfo::kCategoryMovh64 , 13, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #114 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 58, 7 , 0 , 0 , 0 , 0 } }, // #115 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 34, 7 , 0 , 0 , 0 , 0 } }, // #116 [ref=7x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 53, 5 , 0 , 0 , 0 , 0 } }, // #117 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 28, { 42, 9 , 0 , 0 , 0 , 0 } }, // #118 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 20, 19, 0 , 0 , 0 , 0 } }, // #119 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 14, { 11, 3 , 0 , 0 , 0 , 0 } }, // #120 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 34, 9 , 0 , 0 , 0 , 0 } }, // #121 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 59, 41, 3 , 0 , 0 , 0 } }, // #122 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 11, 11, 3 , 60, 0 , 0 } }, // #123 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 16, 28, 0 , 0 , 0 , 0 } }, // #124 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 16, 28, 29, 0 , 0 , 0 } }, // #125 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 3 , 0 , 0 , 0 , 0 , 0 } }, // #126 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 50, 21, 0 , 0 , 0 , 0 } }, // #127 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 50, 61, 0 , 0 , 0 , 0 } }, // #128 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 25, 7 , 0 , 0 , 0 , 0 } }, // #129 [ref=18x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 5 , 5 , 0 , 62, 16, 56 } }, // #130 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 5 , 5 , 0 , 63, 16, 56 } }, // #131 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 5 , 5 , 0 , 62, 0 , 0 } }, // #132 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 5 , 5 , 0 , 63, 0 , 0 } }, // #133 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 31, { 53, 5 , 0 , 0 , 0 , 0 } }, // #134 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 32, { 34, 5 , 0 , 0 , 0 , 0 } }, // #135 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 33, { 47, 3 , 0 , 0 , 0 , 0 } }, // #136 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 64, 5 , 0 , 0 , 0 , 0 } }, // #137 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 15, { 4 , 39, 0 , 0 , 0 , 0 } }, // #138 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 4 , { 4 , 7 , 0 , 0 , 0 , 0 } }, // #139 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 27, { 2 , 13, 0 , 0 , 0 , 0 } }, // #140 [ref=1x]
+ { InstDB::RWInfo::kCategoryVmov1_8 , 0 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #141 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 10, 9 , 0 , 0 , 0 , 0 } }, // #142 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 27, { 10, 13, 0 , 0 , 0 , 0 } }, // #143 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 65, 0 , 0 , 0 , 0 , 0 } }, // #144 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 3 , { 5 , 5 , 0 , 0 , 0 , 0 } }, // #145 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 60, 0 , 0 , 0 , 0 , 0 } }, // #146 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 10, { 2 , 66, 0 , 0 , 0 , 0 } }, // #147 [ref=8x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 36, 9 , 0 , 0 , 0 , 0 } }, // #148 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 11, 0 , 0 , 0 , 0 , 0 } }, // #149 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 15, 67, 28, 0 , 0 , 0 } }, // #150 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 15, 67, 0 , 0 , 0 , 0 } }, // #151 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 15, 67, 62, 0 , 0 , 0 } }, // #152 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 68, 0 , 0 , 0 , 0 , 0 } }, // #153 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 21, 20, 0 , 0 , 0 , 0 } }, // #154 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 31, { 69, 0 , 0 , 0 , 0 , 0 } }, // #155 [ref=30x]
+ { InstDB::RWInfo::kCategoryGeneric , 11, { 2 , 3 , 66, 0 , 0 , 0 } }, // #156 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 34, { 11, 0 , 0 , 0 , 0 , 0 } }, // #157 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 28, { 42, 0 , 0 , 0 , 0 , 0 } }, // #158 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 20, 21, 0 , 0 , 0 , 0 } }, // #159 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 70, 43, 43, 43, 43, 5 } }, // #160 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 4 , 5 , 5 , 5 , 5 , 5 } }, // #161 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 10, 5 , 7 , 0 , 0 , 0 } }, // #162 [ref=8x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 10, 5 , 9 , 0 , 0 , 0 } }, // #163 [ref=9x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 11, 3 , 3 , 3 , 0 , 0 } }, // #164 [ref=3x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 71, 5 , 0 , 0 , 0 , 0 } }, // #165 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 11, 5 , 0 , 0 , 0 , 0 } }, // #166 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 37, { 72, 73, 0 , 0 , 0 , 0 } }, // #167 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 38, { 11, 7 , 0 , 0 , 0 , 0 } }, // #168 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 39, { 11, 9 , 0 , 0 , 0 , 0 } }, // #169 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 11, 5 , 7 , 0 , 0 , 0 } }, // #170 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 11, 5 , 9 , 0 , 0 , 0 } }, // #171 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 11, { 11, 3 , 0 , 0 , 0 , 0 } }, // #172 [ref=7x]
+ { InstDB::RWInfo::kCategoryVmov2_1 , 40, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #173 [ref=14x]
+ { InstDB::RWInfo::kCategoryVmov1_2 , 14, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #174 [ref=7x]
+ { InstDB::RWInfo::kCategoryVmov1_2 , 41, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #175 [ref=10x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 10, 74, 7 , 0 , 0 , 0 } }, // #176 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 42, { 10, 57, 3 , 0 , 0 , 0 } }, // #177 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 42, { 10, 74, 3 , 0 , 0 , 0 } }, // #178 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 10, 57, 9 , 0 , 0 , 0 } }, // #179 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 43, { 10, 5 , 5 , 0 , 0 , 0 } }, // #180 [ref=9x]
+ { InstDB::RWInfo::kCategoryGeneric , 44, { 72, 43, 0 , 0 , 0 , 0 } }, // #181 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 45, { 10, 73, 0 , 0 , 0 , 0 } }, // #182 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 45, { 10, 3 , 0 , 0 , 0 , 0 } }, // #183 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 46, { 71, 43, 0 , 0 , 0 , 0 } }, // #184 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 2 , 3 , 3 , 0 , 0 , 0 } }, // #185 [ref=60x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 4 , 57, 7 , 0 , 0 , 0 } }, // #186 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 4 , 74, 9 , 0 , 0 , 0 } }, // #187 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 6 , 7 , 7 , 0 , 0 , 0 } }, // #188 [ref=11x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 8 , 9 , 9 , 0 , 0 , 0 } }, // #189 [ref=11x]
+ { InstDB::RWInfo::kCategoryGeneric , 47, { 11, 3 , 3 , 3 , 0 , 0 } }, // #190 [ref=15x]
+ { InstDB::RWInfo::kCategoryGeneric , 48, { 34, 7 , 7 , 7 , 0 , 0 } }, // #191 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 49, { 42, 9 , 9 , 9 , 0 , 0 } }, // #192 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 25, 7 , 7 , 0 , 0 , 0 } }, // #193 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 75, 9 , 9 , 0 , 0 , 0 } }, // #194 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 14, { 34, 3 , 0 , 0 , 0 , 0 } }, // #195 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 5 , { 42, 9 , 0 , 0 , 0 , 0 } }, // #196 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 2 , 3 , 2 , 0 , 0 , 0 } }, // #197 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 2 , 3 , 2 , 0 , 0 , 0 } }, // #198 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 18, { 4 , 3 , 4 , 0 , 0 , 0 } }, // #199 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 35, { 10, 57, 7 , 0 , 0 , 0 } }, // #200 [ref=11x]
+ { InstDB::RWInfo::kCategoryGeneric , 36, { 10, 74, 9 , 0 , 0 , 0 } }, // #201 [ref=13x]
+ { InstDB::RWInfo::kCategoryGeneric , 43, { 71, 73, 5 , 0 , 0 , 0 } }, // #202 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 43, { 11, 3 , 5 , 0 , 0 , 0 } }, // #203 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 50, { 72, 43, 73, 0 , 0 , 0 } }, // #204 [ref=4x]
+ { InstDB::RWInfo::kCategoryVmaskmov , 0 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #205 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 34, 0 , 0 , 0 , 0 , 0 } }, // #206 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 21, 0 , 0 , 0 , 0 , 0 } }, // #207 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 51, { 11, 3 , 0 , 0 , 0 , 0 } }, // #208 [ref=12x]
+ { InstDB::RWInfo::kCategoryVmovddup , 52, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #209 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 10, 57, 57, 0 , 0 , 0 } }, // #210 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 34, 57, 0 , 0 , 0 , 0 } }, // #211 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 10, 7 , 7 , 0 , 0 , 0 } }, // #212 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 10, 7 , 7 , 0 , 0 , 0 } }, // #213 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 10, 57, 7 , 0 , 0 , 0 } }, // #214 [ref=2x]
+ { InstDB::RWInfo::kCategoryVmovmskpd , 0 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #215 [ref=1x]
+ { InstDB::RWInfo::kCategoryVmovmskps , 0 , { 0 , 0 , 0 , 0 , 0 , 0 } }, // #216 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 53, { 34, 7 , 0 , 0 , 0 , 0 } }, // #217 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 10, 57, 7 , 0 , 0 , 0 } }, // #218 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 10, 74, 9 , 0 , 0 , 0 } }, // #219 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 13, { 7 , 0 , 0 , 0 , 0 , 0 } }, // #220 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 76, 0 , 0 , 0 , 0 , 0 } }, // #221 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 2 , { 3 , 3 , 0 , 0 , 0 , 0 } }, // #222 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 12, { 72, 43, 43, 43, 43, 5 } }, // #223 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 15, { 11, 39, 0 , 0 , 0 , 0 } }, // #224 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 11, 7 , 0 , 0 , 0 , 0 } }, // #225 [ref=6x]
+ { InstDB::RWInfo::kCategoryGeneric , 27, { 11, 13, 0 , 0 , 0 , 0 } }, // #226 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 6 , { 34, 3 , 3 , 0 , 0 , 0 } }, // #227 [ref=17x]
+ { InstDB::RWInfo::kCategoryGeneric , 50, { 71, 73, 73, 0 , 0 , 0 } }, // #228 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 22, { 11, 3 , 3 , 0 , 0 , 0 } }, // #229 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 7 , { 47, 5 , 0 , 0 , 0 , 0 } }, // #230 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 54, { 10, 5 , 39, 0 , 0 , 0 } }, // #231 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 55, { 10, 5 , 13, 0 , 0 , 0 } }, // #232 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 43, { 10, 5 , 5 , 5 , 0 , 0 } }, // #233 [ref=12x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 34, 3 , 0 , 0 , 0 , 0 } }, // #234 [ref=4x]
+ { InstDB::RWInfo::kCategoryVmov1_4 , 56, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #235 [ref=6x]
+ { InstDB::RWInfo::kCategoryVmov1_8 , 57, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #236 [ref=3x]
+ { InstDB::RWInfo::kCategoryVmov4_1 , 58, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #237 [ref=4x]
+ { InstDB::RWInfo::kCategoryVmov8_1 , 59, { 0 , 0 , 0 , 0 , 0 , 0 } }, // #238 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 60, { 10, 5 , 5 , 5 , 0 , 0 } }, // #239 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 61, { 10, 5 , 5 , 0 , 0 , 0 } }, // #240 [ref=12x]
+ { InstDB::RWInfo::kCategoryGeneric , 18, { 11, 3 , 0 , 0 , 0 , 0 } }, // #241 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 22, { 11, 3 , 5 , 0 , 0 , 0 } }, // #242 [ref=9x]
+ { InstDB::RWInfo::kCategoryGeneric , 62, { 11, 3 , 0 , 0 , 0 , 0 } }, // #243 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 0 , { 56, 16, 28, 0 , 0 , 0 } }, // #244 [ref=2x]
+ { InstDB::RWInfo::kCategoryGeneric , 11, { 2 , 2 , 0 , 0 , 0 , 0 } }, // #245 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 51, { 2 , 2 , 0 , 0 , 0 , 0 } }, // #246 [ref=1x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 3 , 56, 16, 0 , 0 , 0 } }, // #247 [ref=4x]
+ { InstDB::RWInfo::kCategoryGeneric , 8 , { 11, 56, 16, 0 , 0 , 0 } } // #248 [ref=8x]
+};
+
+const InstDB::RWInfoOp InstDB::rwInfoOp[] = {
+ { 0x0000000000000000u, 0x0000000000000000u, 0xFF, { 0 }, 0 }, // #0 [ref=14957x]
+ { 0x0000000000000003u, 0x0000000000000003u, 0x00, { 0 }, OpRWInfo::kRW | OpRWInfo::kRegPhysId }, // #1 [ref=10x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #2 [ref=217x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #3 [ref=978x]
+ { 0x000000000000FFFFu, 0x000000000000FFFFu, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #4 [ref=92x]
+ { 0x000000000000FFFFu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #5 [ref=305x]
+ { 0x00000000000000FFu, 0x00000000000000FFu, 0xFF, { 0 }, OpRWInfo::kRW }, // #6 [ref=18x]
+ { 0x00000000000000FFu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #7 [ref=181x]
+ { 0x000000000000000Fu, 0x000000000000000Fu, 0xFF, { 0 }, OpRWInfo::kRW }, // #8 [ref=18x]
+ { 0x000000000000000Fu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #9 [ref=130x]
+ { 0x0000000000000000u, 0x000000000000FFFFu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #10 [ref=160x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #11 [ref=415x]
+ { 0x0000000000000003u, 0x0000000000000003u, 0xFF, { 0 }, OpRWInfo::kRW }, // #12 [ref=1x]
+ { 0x0000000000000003u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #13 [ref=34x]
+ { 0x000000000000FFFFu, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #14 [ref=4x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0x02, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #15 [ref=7x]
+ { 0x000000000000000Fu, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #16 [ref=21x]
+ { 0x00000000000000FFu, 0x00000000000000FFu, 0x00, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #17 [ref=2x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kMemPhysId }, // #18 [ref=3x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x06, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kMemPhysId }, // #19 [ref=3x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x07, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kMemPhysId }, // #20 [ref=7x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #21 [ref=7x]
+ { 0x00000000000000FFu, 0x00000000000000FFu, 0x02, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #22 [ref=1x]
+ { 0x00000000000000FFu, 0x0000000000000000u, 0x01, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #23 [ref=1x]
+ { 0x00000000000000FFu, 0x0000000000000000u, 0x03, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #24 [ref=1x]
+ { 0x00000000000000FFu, 0x00000000000000FFu, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #25 [ref=20x]
+ { 0x000000000000000Fu, 0x000000000000000Fu, 0x02, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #26 [ref=1x]
+ { 0x000000000000000Fu, 0x000000000000000Fu, 0x00, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #27 [ref=4x]
+ { 0x000000000000000Fu, 0x0000000000000000u, 0x01, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #28 [ref=11x]
+ { 0x000000000000000Fu, 0x0000000000000000u, 0x03, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #29 [ref=2x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0x03, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #30 [ref=1x]
+ { 0x000000000000000Fu, 0x000000000000000Fu, 0x01, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #31 [ref=1x]
+ { 0x0000000000000000u, 0x00000000000000FFu, 0x02, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #32 [ref=1x]
+ { 0x00000000000000FFu, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #33 [ref=1x]
+ { 0x0000000000000000u, 0x00000000000000FFu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #34 [ref=76x]
+ { 0x0000000000000000u, 0x00000000000000FFu, 0xFF, { 0 }, OpRWInfo::kWrite }, // #35 [ref=6x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0xFF, { 0 }, OpRWInfo::kWrite }, // #36 [ref=6x]
+ { 0x0000000000000000u, 0x0000000000000003u, 0x02, { 0 }, OpRWInfo::kWrite | OpRWInfo::kRegPhysId }, // #37 [ref=1x]
+ { 0x0000000000000003u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #38 [ref=1x]
+ { 0x0000000000000001u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #39 [ref=28x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x02, { 0 }, OpRWInfo::kRW | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #40 [ref=2x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRW | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #41 [ref=3x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #42 [ref=29x]
+ { 0xFFFFFFFFFFFFFFFFu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #43 [ref=33x]
+ { 0x00000000000003FFu, 0x00000000000003FFu, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #44 [ref=22x]
+ { 0x00000000000003FFu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #45 [ref=13x]
+ { 0x0000000000000000u, 0x00000000000003FFu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #46 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000003u, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #47 [ref=15x]
+ { 0x0000000000000000u, 0x0000000000000003u, 0x00, { 0 }, OpRWInfo::kWrite | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #48 [ref=2x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kWrite | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #49 [ref=2x]
+ { 0x0000000000000003u, 0x0000000000000000u, 0x02, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #50 [ref=4x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x07, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kMemPhysId }, // #51 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x01, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #52 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000001u, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #53 [ref=14x]
+ { 0x0000000000000000u, 0x0000000000000001u, 0x00, { 0 }, OpRWInfo::kWrite | OpRWInfo::kRegPhysId }, // #54 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x01, { 0 }, OpRWInfo::kRW | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #55 [ref=3x]
+ { 0x000000000000000Fu, 0x0000000000000000u, 0x02, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #56 [ref=20x]
+ { 0x000000000000FF00u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #57 [ref=23x]
+ { 0x0000000000000000u, 0x000000000000FF00u, 0xFF, { 0 }, OpRWInfo::kWrite }, // #58 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x02, { 0 }, OpRWInfo::kWrite | OpRWInfo::kRegPhysId | OpRWInfo::kZExt }, // #59 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x02, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #60 [ref=2x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x06, { 0 }, OpRWInfo::kRead | OpRWInfo::kMemPhysId }, // #61 [ref=1x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0x01, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #62 [ref=5x]
+ { 0x0000000000000000u, 0x000000000000FFFFu, 0x00, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #63 [ref=4x]
+ { 0x0000000000000000u, 0x0000000000000007u, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #64 [ref=2x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x04, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #65 [ref=1x]
+ { 0x0000000000000001u, 0x0000000000000000u, 0x01, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #66 [ref=10x]
+ { 0x0000000000000000u, 0x000000000000000Fu, 0x00, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt | OpRWInfo::kRegPhysId }, // #67 [ref=5x]
+ { 0x0000000000000001u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRead | OpRWInfo::kRegPhysId }, // #68 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000001u, 0xFF, { 0 }, OpRWInfo::kWrite }, // #69 [ref=30x]
+ { 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #70 [ref=2x]
+ { 0x0000000000000000u, 0x00000000FFFFFFFFu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #71 [ref=10x]
+ { 0x0000000000000000u, 0xFFFFFFFFFFFFFFFFu, 0xFF, { 0 }, OpRWInfo::kWrite | OpRWInfo::kZExt }, // #72 [ref=16x]
+ { 0x00000000FFFFFFFFu, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #73 [ref=16x]
+ { 0x000000000000FFF0u, 0x0000000000000000u, 0xFF, { 0 }, OpRWInfo::kRead }, // #74 [ref=18x]
+ { 0x000000000000000Fu, 0x000000000000000Fu, 0xFF, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt }, // #75 [ref=1x]
+ { 0x0000000000000000u, 0x0000000000000000u, 0x00, { 0 }, OpRWInfo::kRW | OpRWInfo::kZExt | OpRWInfo::kRegPhysId } // #76 [ref=1x]
+};
+
+const InstDB::RWInfoRm InstDB::rwInfoRm[] = {
+ { InstDB::RWInfoRm::kCategoryNone , 0x00, 0 , 0, 0 }, // #0 [ref=1809x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x03, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #1 [ref=8x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x02, 0 , 0, 0 }, // #2 [ref=193x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 16, 0, 0 }, // #3 [ref=122x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 8 , 0, 0 }, // #4 [ref=66x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 4 , 0, 0 }, // #5 [ref=34x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x04, 0 , 0, 0 }, // #6 [ref=270x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 2 , 0, 0 }, // #7 [ref=9x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 0 , 0, 0 }, // #8 [ref=60x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 0 , 0, 0 }, // #9 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x01, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #10 [ref=20x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x01, 0 , 0, 0 }, // #11 [ref=13x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 16, 0, 0 }, // #12 [ref=21x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 8 , 0, 0 }, // #13 [ref=20x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x02, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #14 [ref=15x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 1 , 0, 0 }, // #15 [ref=5x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 64, 0, 0 }, // #16 [ref=3x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 4 , 0, 0 }, // #17 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryNone , 0x00, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #18 [ref=22x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 10, 0, 0 }, // #19 [ref=2x]
+ { InstDB::RWInfoRm::kCategoryNone , 0x01, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #20 [ref=5x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 2 , 0, 0 }, // #21 [ref=3x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x06, 0 , 0, 0 }, // #22 [ref=14x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 1 , 0, 0 }, // #23 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 4 , 0, 0 }, // #24 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 8 , 0, 0 }, // #25 [ref=3x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 2 , 0, 0 }, // #26 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 2 , 0, 0 }, // #27 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 4 , 0, 0 }, // #28 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryNone , 0x03, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #29 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 16, 0, 0 }, // #30 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 1 , 0, 0 }, // #31 [ref=32x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 8 , 0, 0 }, // #32 [ref=2x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 2 , 0, Features::kSSE4_1 }, // #33 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 2 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #34 [ref=3x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 8 , 0, 0 }, // #35 [ref=34x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 4 , 0, 0 }, // #36 [ref=37x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x00, 32, 0, 0 }, // #37 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 8 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #38 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 4 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #39 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryHalf , 0x02, 0 , 0, 0 }, // #40 [ref=14x]
+ { InstDB::RWInfoRm::kCategoryHalf , 0x01, 0 , 0, 0 }, // #41 [ref=10x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x04, 0 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #42 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 16, 0, 0 }, // #43 [ref=27x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x02, 64, 0, 0 }, // #44 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 16, 0, 0 }, // #45 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x01, 32, 0, 0 }, // #46 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x0C, 0 , 0, 0 }, // #47 [ref=15x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x0C, 8 , 0, 0 }, // #48 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x0C, 4 , 0, 0 }, // #49 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 32, 0, 0 }, // #50 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x03, 0 , 0, 0 }, // #51 [ref=13x]
+ { InstDB::RWInfoRm::kCategoryNone , 0x02, 0 , 0, 0 }, // #52 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x03, 8 , InstDB::RWInfoRm::kFlagAmbiguous, 0 }, // #53 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 1 , 0, 0 }, // #54 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x04, 2 , 0, 0 }, // #55 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryQuarter , 0x01, 0 , 0, 0 }, // #56 [ref=6x]
+ { InstDB::RWInfoRm::kCategoryEighth , 0x01, 0 , 0, 0 }, // #57 [ref=3x]
+ { InstDB::RWInfoRm::kCategoryQuarter , 0x02, 0 , 0, 0 }, // #58 [ref=4x]
+ { InstDB::RWInfoRm::kCategoryEighth , 0x02, 0 , 0, 0 }, // #59 [ref=2x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x0C, 16, 0, 0 }, // #60 [ref=1x]
+ { InstDB::RWInfoRm::kCategoryFixed , 0x06, 16, 0, 0 }, // #61 [ref=12x]
+ { InstDB::RWInfoRm::kCategoryConsistent, 0x02, 0 , 0, Features::kAVX512_BW } // #62 [ref=2x]
+};
+// ----------------------------------------------------------------------------
+// ${InstRWInfoTable:End}
+
+// ============================================================================
+// [asmjit::x86::InstDB - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(x86_inst_db) {
+ INFO("Checking validity of Inst enums");
+
+ // Cross-validate prefixes.
+ EXPECT(Inst::kOptionRex == 0x40000000u, "REX prefix must be at 0x40000000");
+ EXPECT(Inst::kOptionVex3 == 0x00000400u, "VEX3 prefix must be at 0x00000400");
+ EXPECT(Inst::kOptionEvex == 0x00001000u, "EVEX prefix must be at 0x00001000");
+
+ // These could be combined together to form a valid REX prefix, they must match.
+ EXPECT(uint32_t(Inst::kOptionOpCodeB) == uint32_t(Opcode::kB), "Opcode::kB must match Inst::kOptionOpCodeB");
+ EXPECT(uint32_t(Inst::kOptionOpCodeX) == uint32_t(Opcode::kX), "Opcode::kX must match Inst::kOptionOpCodeX");
+ EXPECT(uint32_t(Inst::kOptionOpCodeR) == uint32_t(Opcode::kR), "Opcode::kR must match Inst::kOptionOpCodeR");
+ EXPECT(uint32_t(Inst::kOptionOpCodeW) == uint32_t(Opcode::kW), "Opcode::kW must match Inst::kOptionOpCodeW");
+
+ uint32_t rex_rb = (Opcode::kR >> Opcode::kREX_Shift) | (Opcode::kB >> Opcode::kREX_Shift) | 0x40;
+ uint32_t rex_rw = (Opcode::kR >> Opcode::kREX_Shift) | (Opcode::kW >> Opcode::kREX_Shift) | 0x40;
+
+ EXPECT(rex_rb == 0x45, "Opcode::kR|B must form a valid REX prefix (0x45) if combined with 0x40");
+ EXPECT(rex_rw == 0x4C, "Opcode::kR|W must form a valid REX prefix (0x4C) if combined with 0x40");
+}
+#endif
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86instdb.h b/3rdparty/asmjit/src/asmjit/x86/x86instdb.h
new file mode 100644
index 00000000000..d03b87035c3
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86instdb.h
@@ -0,0 +1,471 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86INSTDB_H_INCLUDED
+#define ASMJIT_X86_X86INSTDB_H_INCLUDED
+
+#include "../x86/x86globals.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+//! Instruction database (X86).
+namespace InstDB {
+
+// ============================================================================
+// [asmjit::x86::InstDB::Mode]
+// ============================================================================
+
+//! Describes which mode is supported by an instruction or instruction signature.
+enum Mode : uint32_t {
+ kModeNone = 0x00u, //!< Invalid.
+ kModeX86 = 0x01u, //!< X86 mode supported.
+ kModeX64 = 0x02u, //!< X64 mode supported.
+ kModeAny = 0x03u //!< Both X86 and X64 modes supported.
+};
+
+static constexpr uint32_t modeFromArchId(uint32_t archId) noexcept {
+ return archId == ArchInfo::kIdX86 ? kModeX86 :
+ archId == ArchInfo::kIdX64 ? kModeX64 : kModeNone;
+}
+
+// ============================================================================
+// [asmjit::x86::InstDB::OpFlags]
+// ============================================================================
+
+//! Operand flags (X86).
+enum OpFlags : uint32_t {
+ kOpNone = 0x00000000u, //!< No flags.
+
+ kOpGpbLo = 0x00000001u, //!< Operand can be low 8-bit GPB register.
+ kOpGpbHi = 0x00000002u, //!< Operand can be high 8-bit GPB register.
+ kOpGpw = 0x00000004u, //!< Operand can be 16-bit GPW register.
+ kOpGpd = 0x00000008u, //!< Operand can be 32-bit GPD register.
+ kOpGpq = 0x00000010u, //!< Operand can be 64-bit GPQ register.
+ kOpXmm = 0x00000020u, //!< Operand can be 128-bit XMM register.
+ kOpYmm = 0x00000040u, //!< Operand can be 256-bit YMM register.
+ kOpZmm = 0x00000080u, //!< Operand can be 512-bit ZMM register.
+ kOpMm = 0x00000100u, //!< Operand can be 64-bit MM register.
+ kOpKReg = 0x00000200u, //!< Operand can be 64-bit K register.
+ kOpSReg = 0x00000400u, //!< Operand can be SReg (segment register).
+ kOpCReg = 0x00000800u, //!< Operand can be CReg (control register).
+ kOpDReg = 0x00001000u, //!< Operand can be DReg (debug register).
+ kOpSt = 0x00002000u, //!< Operand can be 80-bit ST register (X87).
+ kOpBnd = 0x00004000u, //!< Operand can be 128-bit BND register.
+ kOpAllRegs = 0x00007FFFu, //!< Combination of all possible registers.
+
+ kOpI4 = 0x00010000u, //!< Operand can be unsigned 4-bit immediate.
+ kOpU4 = 0x00020000u, //!< Operand can be unsigned 4-bit immediate.
+ kOpI8 = 0x00040000u, //!< Operand can be signed 8-bit immediate.
+ kOpU8 = 0x00080000u, //!< Operand can be unsigned 8-bit immediate.
+ kOpI16 = 0x00100000u, //!< Operand can be signed 16-bit immediate.
+ kOpU16 = 0x00200000u, //!< Operand can be unsigned 16-bit immediate.
+ kOpI32 = 0x00400000u, //!< Operand can be signed 32-bit immediate.
+ kOpU32 = 0x00800000u, //!< Operand can be unsigned 32-bit immediate.
+ kOpI64 = 0x01000000u, //!< Operand can be signed 64-bit immediate.
+ kOpU64 = 0x02000000u, //!< Operand can be unsigned 64-bit immediate.
+ kOpAllImm = 0x03FF0000u, //!< Operand can be any immediate.
+
+ kOpMem = 0x04000000u, //!< Operand can be a scalar memory pointer.
+ kOpVm = 0x08000000u, //!< Operand can be a vector memory pointer.
+
+ kOpRel8 = 0x10000000u, //!< Operand can be relative 8-bit displacement.
+ kOpRel32 = 0x20000000u, //!< Operand can be relative 32-bit displacement.
+
+ kOpImplicit = 0x80000000u //!< Operand is implicit.
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB::MemFlags]
+// ============================================================================
+
+//! Memory operand flags (X86).
+enum MemFlags : uint32_t {
+ // NOTE: Instruction uses either scalar or vector memory operands, they never
+ // collide. This allows us to share bits between "M" and "Vm" enums.
+
+ kMemOpAny = 0x0001u, //!< Operand can be any scalar memory pointer.
+ kMemOpM8 = 0x0002u, //!< Operand can be an 8-bit memory pointer.
+ kMemOpM16 = 0x0004u, //!< Operand can be a 16-bit memory pointer.
+ kMemOpM32 = 0x0008u, //!< Operand can be a 32-bit memory pointer.
+ kMemOpM48 = 0x0010u, //!< Operand can be a 48-bit memory pointer (FAR pointers only).
+ kMemOpM64 = 0x0020u, //!< Operand can be a 64-bit memory pointer.
+ kMemOpM80 = 0x0040u, //!< Operand can be an 80-bit memory pointer.
+ kMemOpM128 = 0x0080u, //!< Operand can be a 128-bit memory pointer.
+ kMemOpM256 = 0x0100u, //!< Operand can be a 256-bit memory pointer.
+ kMemOpM512 = 0x0200u, //!< Operand can be a 512-bit memory pointer.
+ kMemOpM1024 = 0x0400u, //!< Operand can be a 1024-bit memory pointer.
+
+ kMemOpVm32x = 0x0002u, //!< Operand can be a vm32x (vector) pointer.
+ kMemOpVm32y = 0x0004u, //!< Operand can be a vm32y (vector) pointer.
+ kMemOpVm32z = 0x0008u, //!< Operand can be a vm32z (vector) pointer.
+ kMemOpVm64x = 0x0020u, //!< Operand can be a vm64x (vector) pointer.
+ kMemOpVm64y = 0x0040u, //!< Operand can be a vm64y (vector) pointer.
+ kMemOpVm64z = 0x0080u, //!< Operand can be a vm64z (vector) pointer.
+
+ kMemOpBaseOnly = 0x0800u, //!< Only memory base is allowed (no index, no offset).
+ kMemOpDs = 0x1000u, //!< Implicit memory operand's DS segment.
+ kMemOpEs = 0x2000u, //!< Implicit memory operand's ES segment.
+
+ kMemOpMib = 0x4000u //!< Operand must be MIB (base+index) pointer.
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB::Flags]
+// ============================================================================
+
+//! Instruction flags (X86).
+//!
+//! Details about instruction encoding, operation, features, and some limitations.
+enum Flags : uint32_t {
+ kFlagNone = 0x00000000u, //!< No flags.
+
+ // TODO: Deprecated
+ // ----------------
+
+ kFlagVolatile = 0x00000040u,
+ kFlagPrivileged = 0x00000080u, //!< This is a privileged operation that cannot run in user mode.
+
+ // Instruction Family
+ // ------------------
+ //
+ // Instruction family information.
+
+ kFlagFpu = 0x00000100u, //!< Instruction that accesses FPU registers.
+ kFlagMmx = 0x00000200u, //!< Instruction that accesses MMX registers (including 3DNOW and GEODE) and EMMS.
+ kFlagVec = 0x00000400u, //!< Instruction that accesses XMM registers (SSE, AVX, AVX512).
+
+ // Prefixes and Encoding Flags
+ // ---------------------------
+ //
+ // These describe optional X86 prefixes that can be used to change the instruction's operation.
+
+ kFlagRep = 0x00001000u, //!< Instruction can be prefixed with using the REP(REPE) or REPNE prefix.
+ kFlagRepIgnored = 0x00002000u, //!< Instruction ignores REP|REPNE prefixes, but they are accepted.
+ kFlagLock = 0x00004000u, //!< Instruction can be prefixed with using the LOCK prefix.
+ kFlagXAcquire = 0x00008000u, //!< Instruction can be prefixed with using the XACQUIRE prefix.
+ kFlagXRelease = 0x00010000u, //!< Instruction can be prefixed with using the XRELEASE prefix.
+ kFlagMib = 0x00020000u, //!< Instruction uses MIB (BNDLDX|BNDSTX) to encode two registers.
+ kFlagVsib = 0x00040000u, //!< Instruction uses VSIB instead of legacy SIB.
+ kFlagVex = 0x00080000u, //!< Instruction can be encoded by VEX|XOP (AVX|AVX2|BMI|XOP|...).
+ kFlagEvex = 0x00100000u, //!< Instruction can be encoded by EVEX (AVX512).
+
+ // FPU Flags
+ // ---------
+ //
+ // Used to tell the encoder which memory operand sizes are encodable.
+
+ kFlagFpuM16 = 0x00200000u, //!< FPU instruction can address `word_ptr` (shared with M80).
+ kFlagFpuM32 = 0x00400000u, //!< FPU instruction can address `dword_ptr`.
+ kFlagFpuM64 = 0x00800000u, //!< FPU instruction can address `qword_ptr`.
+ kFlagFpuM80 = 0x00200000u, //!< FPU instruction can address `tword_ptr` (shared with M16).
+
+ // AVX and AVX515 Flags
+ // --------------------
+ //
+ // If both `kFlagPrefixVex` and `kFlagPrefixEvex` flags are specified it
+ // means that the instructions can be encoded by either VEX or EVEX prefix.
+ // In that case AsmJit checks global options and also instruction options
+ // to decide whether to emit VEX or EVEX prefix.
+
+ kFlagAvx512_ = 0x00000000u, //!< Internally used in tables, has no meaning.
+ kFlagAvx512K = 0x01000000u, //!< Supports masking {k1..k7}.
+ kFlagAvx512Z = 0x02000000u, //!< Supports zeroing {z}, must be used together with `kAvx512k`.
+ kFlagAvx512ER = 0x04000000u, //!< Supports 'embedded-rounding' {er} with implicit {sae},
+ kFlagAvx512SAE = 0x08000000u, //!< Supports 'suppress-all-exceptions' {sae}.
+ kFlagAvx512B32 = 0x10000000u, //!< Supports 32-bit broadcast 'b32'.
+ kFlagAvx512B64 = 0x20000000u, //!< Supports 64-bit broadcast 'b64'.
+ kFlagAvx512T4X = 0x80000000u, //!< Operates on a vector of consecutive registers (AVX512_4FMAPS and AVX512_4VNNIW).
+
+ // Combinations used by instruction tables to make AVX512 definitions more compact.
+ kFlagAvx512KZ = kFlagAvx512K | kFlagAvx512Z,
+ kFlagAvx512ER_SAE = kFlagAvx512ER | kFlagAvx512SAE,
+ kFlagAvx512KZ_SAE = kFlagAvx512KZ | kFlagAvx512SAE,
+ kFlagAvx512KZ_SAE_B32 = kFlagAvx512KZ_SAE | kFlagAvx512B32,
+ kFlagAvx512KZ_SAE_B64 = kFlagAvx512KZ_SAE | kFlagAvx512B64,
+
+ kFlagAvx512KZ_ER_SAE = kFlagAvx512KZ | kFlagAvx512ER_SAE,
+ kFlagAvx512KZ_ER_SAE_B32 = kFlagAvx512KZ_ER_SAE | kFlagAvx512B32,
+ kFlagAvx512KZ_ER_SAE_B64 = kFlagAvx512KZ_ER_SAE | kFlagAvx512B64,
+
+ kFlagAvx512K_B32 = kFlagAvx512K | kFlagAvx512B32,
+ kFlagAvx512K_B64 = kFlagAvx512K | kFlagAvx512B64,
+ kFlagAvx512KZ_B32 = kFlagAvx512KZ | kFlagAvx512B32,
+ kFlagAvx512KZ_B64 = kFlagAvx512KZ | kFlagAvx512B64
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB::SingleRegCase]
+// ============================================================================
+
+enum SingleRegCase : uint32_t {
+ //! No special handling.
+ kSingleRegNone = 0,
+ //! Operands become read-only - `REG & REG` and similar.
+ kSingleRegRO = 1,
+ //! Operands become write-only - `REG ^ REG` and similar.
+ kSingleRegWO = 2
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB::InstSignature / OpSignature]
+// ============================================================================
+
+//! Operand signature (X86).
+//!
+//! Contains all possible operand combinations, memory size information, and
+//! a fixed register id (or `BaseReg::kIdBad` if fixed id isn't required).
+struct OpSignature {
+ //! Operand flags.
+ uint32_t opFlags;
+ //! Memory flags.
+ uint16_t memFlags;
+ //! Extra flags.
+ uint8_t extFlags;
+ //! Mask of possible register IDs.
+ uint8_t regMask;
+};
+
+ASMJIT_VARAPI const OpSignature _opSignatureTable[];
+
+//! Instruction signature (X86).
+//!
+//! Contains a sequence of operands' combinations and other metadata that defines
+//! a single instruction. This data is used by instruction validator.
+struct InstSignature {
+ //! Count of operands in `opIndex` (0..6).
+ uint8_t opCount : 3;
+ //! Architecture modes supported (X86 / X64).
+ uint8_t modes : 2;
+ //! Number of implicit operands.
+ uint8_t implicit : 3;
+ //! Reserved for future use.
+ uint8_t reserved;
+ //! Indexes to `OpSignature` table.
+ uint8_t operands[Globals::kMaxOpCount];
+};
+
+ASMJIT_VARAPI const InstSignature _instSignatureTable[];
+
+// ============================================================================
+// [asmjit::x86::InstDB::CommonInfo]
+// ============================================================================
+
+//! Instruction common information (X86)
+//!
+//! Aggregated information shared across one or more instruction.
+struct CommonInfo {
+ //! Instruction flags.
+ uint32_t _flags;
+ //! First `InstSignature` entry in the database.
+ uint32_t _iSignatureIndex : 11;
+ //! Number of relevant `ISignature` entries.
+ uint32_t _iSignatureCount : 5;
+ //! Control type, see `ControlType`.
+ uint32_t _controlType : 3;
+ //! Specifies what happens if all source operands share the same register.
+ uint32_t _singleRegCase : 2;
+ //! Reserved for future use.
+ uint32_t _reserved : 11;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Returns instruction flags, see `InstInfo::Flags`.
+ inline uint32_t flags() const noexcept { return _flags; }
+ //! Tests whether the instruction has a `flag`, see `InstInfo::Flags`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
+
+ //! Tests whether the instruction is FPU instruction.
+ inline bool isFpu() const noexcept { return hasFlag(kFlagFpu); }
+ //! Tests whether the instruction is MMX/3DNOW instruction that accesses MMX registers (includes EMMS and FEMMS).
+ inline bool isMmx() const noexcept { return hasFlag(kFlagMmx); }
+ //! Tests whether the instruction is SSE|AVX|AVX512 instruction that accesses XMM|YMM|ZMM registers.
+ inline bool isVec() const noexcept { return hasFlag(kFlagVec); }
+ //! Tests whether the instruction is SSE+ (SSE4.2, AES, SHA included) instruction that accesses XMM registers.
+ inline bool isSse() const noexcept { return (flags() & (kFlagVec | kFlagVex | kFlagEvex)) == kFlagVec; }
+ //! Tests whether the instruction is AVX+ (FMA included) instruction that accesses XMM|YMM|ZMM registers.
+ inline bool isAvx() const noexcept { return isVec() && isVexOrEvex(); }
+
+ //! Tests whether the instruction can be prefixed with LOCK prefix.
+ inline bool hasLockPrefix() const noexcept { return hasFlag(kFlagLock); }
+ //! Tests whether the instruction can be prefixed with REP (REPE|REPZ) prefix.
+ inline bool hasRepPrefix() const noexcept { return hasFlag(kFlagRep); }
+ //! Tests whether the instruction can be prefixed with XACQUIRE prefix.
+ inline bool hasXAcquirePrefix() const noexcept { return hasFlag(kFlagXAcquire); }
+ //! Tests whether the instruction can be prefixed with XRELEASE prefix.
+ inline bool hasXReleasePrefix() const noexcept { return hasFlag(kFlagXRelease); }
+
+ //! Tests whether the rep prefix is supported by the instruction, but ignored (has no effect).
+ inline bool isRepIgnored() const noexcept { return hasFlag(kFlagRepIgnored); }
+ //! Tests whether the instruction uses MIB.
+ inline bool isMibOp() const noexcept { return hasFlag(kFlagMib); }
+ //! Tests whether the instruction uses VSIB.
+ inline bool isVsibOp() const noexcept { return hasFlag(kFlagVsib); }
+ //! Tests whether the instruction uses VEX (can be set together with EVEX if both are encodable).
+ inline bool isVex() const noexcept { return hasFlag(kFlagVex); }
+ //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable).
+ inline bool isEvex() const noexcept { return hasFlag(kFlagEvex); }
+ //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable).
+ inline bool isVexOrEvex() const noexcept { return hasFlag(kFlagVex | kFlagEvex); }
+
+ //! Tests whether the instruction supports AVX512 masking {k}.
+ inline bool hasAvx512K() const noexcept { return hasFlag(kFlagAvx512K); }
+ //! Tests whether the instruction supports AVX512 zeroing {k}{z}.
+ inline bool hasAvx512Z() const noexcept { return hasFlag(kFlagAvx512Z); }
+ //! Tests whether the instruction supports AVX512 embedded-rounding {er}.
+ inline bool hasAvx512ER() const noexcept { return hasFlag(kFlagAvx512ER); }
+ //! Tests whether the instruction supports AVX512 suppress-all-exceptions {sae}.
+ inline bool hasAvx512SAE() const noexcept { return hasFlag(kFlagAvx512SAE); }
+ //! Tests whether the instruction supports AVX512 broadcast (either 32-bit or 64-bit).
+ inline bool hasAvx512B() const noexcept { return hasFlag(kFlagAvx512B32 | kFlagAvx512B64); }
+ //! Tests whether the instruction supports AVX512 broadcast (32-bit).
+ inline bool hasAvx512B32() const noexcept { return hasFlag(kFlagAvx512B32); }
+ //! Tests whether the instruction supports AVX512 broadcast (64-bit).
+ inline bool hasAvx512B64() const noexcept { return hasFlag(kFlagAvx512B64); }
+
+ inline uint32_t signatureIndex() const noexcept { return _iSignatureIndex; }
+ inline uint32_t signatureCount() const noexcept { return _iSignatureCount; }
+
+ inline const InstSignature* signatureData() const noexcept { return _instSignatureTable + _iSignatureIndex; }
+ inline const InstSignature* signatureEnd() const noexcept { return _instSignatureTable + _iSignatureIndex + _iSignatureCount; }
+
+ //! Returns the control-flow type of the instruction.
+ inline uint32_t controlType() const noexcept { return _controlType; }
+
+ inline uint32_t singleRegCase() const noexcept { return _singleRegCase; }
+};
+
+ASMJIT_VARAPI const CommonInfo _commonInfoTable[];
+
+// ============================================================================
+// [asmjit::x86::InstDB::InstInfo]
+// ============================================================================
+
+//! Instruction information (X86).
+struct InstInfo {
+ //! Index to `_nameData`.
+ uint32_t _nameDataIndex : 14;
+ //! Index to `_commonInfoTable`.
+ uint32_t _commonInfoIndex : 10;
+ //! Index to `InstDB::_commonInfoTableB`.
+ uint32_t _commonInfoIndexB : 8;
+
+ //! Instruction encoding, see `InstDB::EncodingId`.
+ uint8_t _encoding;
+ //! Main opcode value (0.255).
+ uint8_t _mainOpcodeValue;
+ //! Index to `InstDB::_mainOpcodeTable` that is combined with `_mainOpcodeValue`
+ //! to form the final opcode.
+ uint8_t _mainOpcodeIndex;
+ //! Index to `InstDB::_altOpcodeTable` that contains a full alternative opcode.
+ uint8_t _altOpcodeIndex;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Returns common information, see `CommonInfo`.
+ inline const CommonInfo& commonInfo() const noexcept { return _commonInfoTable[_commonInfoIndex]; }
+
+ //! Tests whether the instruction has flag `flag`, see `Flags`.
+ inline bool hasFlag(uint32_t flag) const noexcept { return commonInfo().hasFlag(flag); }
+ //! Returns instruction flags, see `Flags`.
+ inline uint32_t flags() const noexcept { return commonInfo().flags(); }
+
+ //! Tests whether the instruction is FPU instruction.
+ inline bool isFpu() const noexcept { return commonInfo().isFpu(); }
+ //! Tests whether the instruction is MMX/3DNOW instruction that accesses MMX registers (includes EMMS and FEMMS).
+ inline bool isMmx() const noexcept { return commonInfo().isMmx(); }
+ //! Tests whether the instruction is SSE|AVX|AVX512 instruction that accesses XMM|YMM|ZMM registers.
+ inline bool isVec() const noexcept { return commonInfo().isVec(); }
+ //! Tests whether the instruction is SSE+ (SSE4.2, AES, SHA included) instruction that accesses XMM registers.
+ inline bool isSse() const noexcept { return commonInfo().isSse(); }
+ //! Tests whether the instruction is AVX+ (FMA included) instruction that accesses XMM|YMM|ZMM registers.
+ inline bool isAvx() const noexcept { return commonInfo().isAvx(); }
+
+ //! Tests whether the instruction can be prefixed with LOCK prefix.
+ inline bool hasLockPrefix() const noexcept { return commonInfo().hasLockPrefix(); }
+ //! Tests whether the instruction can be prefixed with REP (REPE|REPZ) prefix.
+ inline bool hasRepPrefix() const noexcept { return commonInfo().hasRepPrefix(); }
+ //! Tests whether the instruction can be prefixed with XACQUIRE prefix.
+ inline bool hasXAcquirePrefix() const noexcept { return commonInfo().hasXAcquirePrefix(); }
+ //! Tests whether the instruction can be prefixed with XRELEASE prefix.
+ inline bool hasXReleasePrefix() const noexcept { return commonInfo().hasXReleasePrefix(); }
+
+ //! Tests whether the rep prefix is supported by the instruction, but ignored (has no effect).
+ inline bool isRepIgnored() const noexcept { return commonInfo().isRepIgnored(); }
+ //! Tests whether the instruction uses MIB.
+ inline bool isMibOp() const noexcept { return hasFlag(kFlagMib); }
+ //! Tests whether the instruction uses VSIB.
+ inline bool isVsibOp() const noexcept { return hasFlag(kFlagVsib); }
+ //! Tests whether the instruction uses VEX (can be set together with EVEX if both are encodable).
+ inline bool isVex() const noexcept { return hasFlag(kFlagVex); }
+ //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable).
+ inline bool isEvex() const noexcept { return hasFlag(kFlagEvex); }
+ //! Tests whether the instruction uses EVEX (can be set together with VEX if both are encodable).
+ inline bool isVexOrEvex() const noexcept { return hasFlag(kFlagVex | kFlagEvex); }
+
+ //! Tests whether the instruction supports AVX512 masking {k}.
+ inline bool hasAvx512K() const noexcept { return hasFlag(kFlagAvx512K); }
+ //! Tests whether the instruction supports AVX512 zeroing {k}{z}.
+ inline bool hasAvx512Z() const noexcept { return hasFlag(kFlagAvx512Z); }
+ //! Tests whether the instruction supports AVX512 embedded-rounding {er}.
+ inline bool hasAvx512ER() const noexcept { return hasFlag(kFlagAvx512ER); }
+ //! Tests whether the instruction supports AVX512 suppress-all-exceptions {sae}.
+ inline bool hasAvx512SAE() const noexcept { return hasFlag(kFlagAvx512SAE); }
+ //! Tests whether the instruction supports AVX512 broadcast (either 32-bit or 64-bit).
+ inline bool hasAvx512B() const noexcept { return hasFlag(kFlagAvx512B32 | kFlagAvx512B64); }
+ //! Tests whether the instruction supports AVX512 broadcast (32-bit).
+ inline bool hasAvx512B32() const noexcept { return hasFlag(kFlagAvx512B32); }
+ //! Tests whether the instruction supports AVX512 broadcast (64-bit).
+ inline bool hasAvx512B64() const noexcept { return hasFlag(kFlagAvx512B64); }
+
+ //! Gets the control-flow type of the instruction.
+ inline uint32_t controlType() const noexcept { return commonInfo().controlType(); }
+ inline uint32_t singleRegCase() const noexcept { return commonInfo().singleRegCase(); }
+
+ inline uint32_t signatureIndex() const noexcept { return commonInfo().signatureIndex(); }
+ inline uint32_t signatureCount() const noexcept { return commonInfo().signatureCount(); }
+
+ inline const InstSignature* signatureData() const noexcept { return commonInfo().signatureData(); }
+ inline const InstSignature* signatureEnd() const noexcept { return commonInfo().signatureEnd(); }
+};
+
+ASMJIT_VARAPI const InstInfo _instInfoTable[];
+
+inline const InstInfo& infoById(uint32_t instId) noexcept {
+ ASMJIT_ASSERT(Inst::isDefinedId(instId));
+ return _instInfoTable[instId];
+}
+
+} // {InstDB}
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86INSTDB_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h b/3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h
new file mode 100644
index 00000000000..b8ec1dbd597
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86instdb_p.h
@@ -0,0 +1,318 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86INSTDB_P_H_INCLUDED
+#define ASMJIT_X86_X86INSTDB_P_H_INCLUDED
+
+#include "../x86/x86instdb.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_x86
+//! \{
+
+namespace InstDB {
+
+// ============================================================================
+// [asmjit::x86::InstDB::Encoding]
+// ============================================================================
+
+//! Instruction encoding (X86).
+//!
+//! This is a specific identifier that is used by AsmJit to describe the way
+//! each instruction is encoded. Some encodings are special only for a single
+//! instruction as X86 instruction set contains a lot of legacy encodings, and
+//! some encodings describe a group of instructions that share some commons,
+//! like MMX, SSE, AVX, AVX512 instructions, etc...
+enum EncodingId : uint32_t {
+ kEncodingNone = 0, //!< Never used.
+ kEncodingX86Op, //!< X86 [OP].
+ kEncodingX86Op_O, //!< X86 [OP] (opcode and /0-7).
+ kEncodingX86Op_O_I8, //!< X86 [OP] (opcode and /0-7 + 8-bit immediate).
+ kEncodingX86Op_xAddr, //!< X86 [OP] (implicit address in the first register operand).
+ kEncodingX86Op_xAX, //!< X86 [OP] (implicit or explicit '?AX' form).
+ kEncodingX86Op_xDX_xAX, //!< X86 [OP] (implicit or explicit '?DX, ?AX' form).
+ kEncodingX86Op_MemZAX, //!< X86 [OP] (implicit or explicit '[EAX|RAX]' form).
+ kEncodingX86I_xAX, //!< X86 [I] (implicit or explicit '?AX' form).
+ kEncodingX86M, //!< X86 [M] (handles 2|4|8-bytes size).
+ kEncodingX86M_NoSize, //!< X86 [M] (doesn't handle any size).
+ kEncodingX86M_GPB, //!< X86 [M] (handles single-byte size).
+ kEncodingX86M_GPB_MulDiv, //!< X86 [M] (like GPB, handles implicit|explicit MUL|DIV|IDIV).
+ kEncodingX86M_Only, //!< X86 [M] (restricted to memory operand of any size).
+ kEncodingX86M_Nop, //!< X86 [M] (special case of NOP instruction).
+ kEncodingX86R_Native, //!< X86 [R] (register must be either 32-bit or 64-bit depending on arch).
+ kEncodingX86Rm, //!< X86 [RM] (doesn't handle single-byte size).
+ kEncodingX86Rm_Raw66H, //!< X86 [RM] (used by LZCNT, POPCNT, and TZCNT).
+ kEncodingX86Rm_NoSize, //!< X86 [RM] (doesn't add REX.W prefix if 64-bit reg is used).
+ kEncodingX86Mr, //!< X86 [MR] (doesn't handle single-byte size).
+ kEncodingX86Mr_NoSize, //!< X86 [MR] (doesn't handle any size).
+ kEncodingX86Arith, //!< X86 adc, add, and, cmp, or, sbb, sub, xor.
+ kEncodingX86Bswap, //!< X86 bswap.
+ kEncodingX86Bt, //!< X86 bt, btc, btr, bts.
+ kEncodingX86Call, //!< X86 call.
+ kEncodingX86Cmpxchg, //!< X86 [MR] cmpxchg.
+ kEncodingX86Cmpxchg8b_16b, //!< X86 [MR] cmpxchg8b, cmpxchg16b.
+ kEncodingX86Crc, //!< X86 crc32.
+ kEncodingX86Enter, //!< X86 enter.
+ kEncodingX86Imul, //!< X86 imul.
+ kEncodingX86In, //!< X86 in.
+ kEncodingX86Ins, //!< X86 ins[b|q|d].
+ kEncodingX86IncDec, //!< X86 inc, dec.
+ kEncodingX86Int, //!< X86 int (interrupt).
+ kEncodingX86Jcc, //!< X86 jcc.
+ kEncodingX86JecxzLoop, //!< X86 jcxz, jecxz, jrcxz, loop, loope, loopne.
+ kEncodingX86Jmp, //!< X86 jmp.
+ kEncodingX86JmpRel, //!< X86 xbegin.
+ kEncodingX86Lea, //!< X86 lea.
+ kEncodingX86Mov, //!< X86 mov (all possible cases).
+ kEncodingX86MovsxMovzx, //!< X86 movsx, movzx.
+ kEncodingX86MovntiMovdiri, //!< X86 movnti/movdiri.
+ kEncodingX86EnqcmdMovdir64b, //!< X86 enqcmd/enqcmds/movdir64b.
+ kEncodingX86Out, //!< X86 out.
+ kEncodingX86Outs, //!< X86 out[b|w|d].
+ kEncodingX86Push, //!< X86 push.
+ kEncodingX86Pop, //!< X86 pop.
+ kEncodingX86Ret, //!< X86 ret.
+ kEncodingX86Rot, //!< X86 rcl, rcr, rol, ror, sal, sar, shl, shr.
+ kEncodingX86Set, //!< X86 setcc.
+ kEncodingX86ShldShrd, //!< X86 shld, shrd.
+ kEncodingX86StrRm, //!< X86 lods.
+ kEncodingX86StrMr, //!< X86 scas, stos.
+ kEncodingX86StrMm, //!< X86 cmps, movs.
+ kEncodingX86Test, //!< X86 test.
+ kEncodingX86Xadd, //!< X86 xadd.
+ kEncodingX86Xchg, //!< X86 xchg.
+ kEncodingX86Fence, //!< X86 lfence, mfence, sfence.
+ kEncodingX86Bndmov, //!< X86 [RM|MR] (used by BNDMOV).
+ kEncodingFpuOp, //!< FPU [OP].
+ kEncodingFpuArith, //!< FPU fadd, fdiv, fdivr, fmul, fsub, fsubr.
+ kEncodingFpuCom, //!< FPU fcom, fcomp.
+ kEncodingFpuFldFst, //!< FPU fld, fst, fstp.
+ kEncodingFpuM, //!< FPU fiadd, ficom, ficomp, fidiv, fidivr, fild, fimul, fist, fistp, fisttp, fisub, fisubr.
+ kEncodingFpuR, //!< FPU fcmov, fcomi, fcomip, ffree, fucom, fucomi, fucomip, fucomp, fxch.
+ kEncodingFpuRDef, //!< FPU faddp, fdivp, fdivrp, fmulp, fsubp, fsubrp.
+ kEncodingFpuStsw, //!< FPU fnstsw, Fstsw.
+ kEncodingExtRm, //!< EXT [RM].
+ kEncodingExtRm_XMM0, //!< EXT [RM<XMM0>].
+ kEncodingExtRm_ZDI, //!< EXT [RM<ZDI>].
+ kEncodingExtRm_P, //!< EXT [RM] (propagates 66H if the instruction uses XMM register).
+ kEncodingExtRm_Wx, //!< EXT [RM] (propagates REX.W if GPQ is used).
+ kEncodingExtRmRi, //!< EXT [RM|RI].
+ kEncodingExtRmRi_P, //!< EXT [RM|RI] (propagates 66H if the instruction uses XMM register).
+ kEncodingExtRmi, //!< EXT [RMI].
+ kEncodingExtRmi_P, //!< EXT [RMI] (propagates 66H if the instruction uses XMM register).
+ kEncodingExtPextrw, //!< EXT pextrw.
+ kEncodingExtExtract, //!< EXT pextrb, pextrd, pextrq, extractps.
+ kEncodingExtMov, //!< EXT mov?? - #1:[MM|XMM, MM|XMM|Mem] #2:[MM|XMM|Mem, MM|XMM].
+ kEncodingExtMovbe, //!< EXT movbe.
+ kEncodingExtMovd, //!< EXT movd.
+ kEncodingExtMovq, //!< EXT movq.
+ kEncodingExtExtrq, //!< EXT extrq (SSE4A).
+ kEncodingExtInsertq, //!< EXT insrq (SSE4A).
+ kEncodingExt3dNow, //!< EXT [RMI] (3DNOW specific).
+ kEncodingVexOp, //!< VEX [OP].
+ kEncodingVexKmov, //!< VEX [RM|MR] (used by kmov[b|w|d|q]).
+ kEncodingVexR_Wx, //!< VEX|EVEX [R] (propagatex VEX.W if GPQ used).
+ kEncodingVexM, //!< VEX|EVEX [M].
+ kEncodingVexM_VM, //!< VEX|EVEX [M] (propagates VEX|EVEX.L, VSIB support).
+ kEncodingVexMr_Lx, //!< VEX|EVEX [MR] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexMr_VM, //!< VEX|EVEX [MR] (propagates VEX|EVEX.L, VSIB support).
+ kEncodingVexMri, //!< VEX|EVEX [MRI].
+ kEncodingVexMri_Lx, //!< VEX|EVEX [MRI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRm, //!< VEX|EVEX [RM].
+ kEncodingVexRm_ZDI, //!< VEX|EVEX [RM<ZDI>].
+ kEncodingVexRm_Wx, //!< VEX|EVEX [RM] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexRm_Lx, //!< VEX|EVEX [RM] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRm_Lx_Bcst, //!< VEX|EVEX [RM] (can handle broadcast r32/r64).
+ kEncodingVexRm_VM, //!< VEX|EVEX [RM] (propagates VEX|EVEX.L, VSIB support).
+ kEncodingVexRm_T1_4X, //!< EVEX [RM] (used by NN instructions that use RM-T1_4X encoding).
+ kEncodingVexRmi, //!< VEX|EVEX [RMI].
+ kEncodingVexRmi_Wx, //!< VEX|EVEX [RMI] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexRmi_Lx, //!< VEX|EVEX [RMI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvm, //!< VEX|EVEX [RVM].
+ kEncodingVexRvm_Wx, //!< VEX|EVEX [RVM] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexRvm_ZDX_Wx, //!< VEX|EVEX [RVM<ZDX>] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexRvm_Lx, //!< VEX|EVEX [RVM] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvmr, //!< VEX|EVEX [RVMR].
+ kEncodingVexRvmr_Lx, //!< VEX|EVEX [RVMR] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvmi, //!< VEX|EVEX [RVMI].
+ kEncodingVexRvmi_Lx, //!< VEX|EVEX [RVMI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRmv, //!< VEX|EVEX [RMV].
+ kEncodingVexRmv_Wx, //!< VEX|EVEX [RMV] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexRmv_VM, //!< VEX|EVEX [RMV] (propagates VEX|EVEX.L, VSIB support).
+ kEncodingVexRmvRm_VM, //!< VEX|EVEX [RMV|RM] (propagates VEX|EVEX.L, VSIB support).
+ kEncodingVexRmvi, //!< VEX|EVEX [RMVI].
+ kEncodingVexRmMr, //!< VEX|EVEX [RM|MR].
+ kEncodingVexRmMr_Lx, //!< VEX|EVEX [RM|MR] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvmRmv, //!< VEX|EVEX [RVM|RMV].
+ kEncodingVexRvmRmi, //!< VEX|EVEX [RVM|RMI].
+ kEncodingVexRvmRmi_Lx, //!< VEX|EVEX [RVM|RMI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvmRmvRmi, //!< VEX|EVEX [RVM|RMV|RMI].
+ kEncodingVexRvmMr, //!< VEX|EVEX [RVM|MR].
+ kEncodingVexRvmMvr, //!< VEX|EVEX [RVM|MVR].
+ kEncodingVexRvmMvr_Lx, //!< VEX|EVEX [RVM|MVR] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvmVmi, //!< VEX|EVEX [RVM|VMI].
+ kEncodingVexRvmVmi_Lx, //!< VEX|EVEX [RVM|VMI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexVm, //!< VEX|EVEX [VM].
+ kEncodingVexVm_Wx, //!< VEX|EVEX [VM] (propagates VEX|EVEX.W if GPQ used).
+ kEncodingVexVmi, //!< VEX|EVEX [VMI].
+ kEncodingVexVmi_Lx, //!< VEX|EVEX [VMI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexVmi4_Wx, //!< VEX|EVEX [VMI] (propagates VEX|EVEX.W if GPQ used, DWORD Immediate).
+ kEncodingVexEvexVmi_Lx, //!< VEX|EVEX [VMI] (special, used by vpsrldq and vpslldq)
+ kEncodingVexRvrmRvmr, //!< VEX|EVEX [RVRM|RVMR].
+ kEncodingVexRvrmRvmr_Lx, //!< VEX|EVEX [RVRM|RVMR] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexRvrmiRvmri_Lx, //!< VEX|EVEX [RVRMI|RVMRI] (propagates VEX|EVEX.L if YMM used).
+ kEncodingVexMovdMovq, //!< VEX|EVEX vmovd, vmovq.
+ kEncodingVexMovssMovsd, //!< VEX|EVEX vmovss, vmovsd.
+ kEncodingFma4, //!< FMA4 [R, R, R/M, R/M].
+ kEncodingFma4_Lx, //!< FMA4 [R, R, R/M, R/M] (propagates AVX.L if YMM used).
+ kEncodingCount //!< Count of instruction encodings.
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB - CommonInfoTableB]
+// ============================================================================
+
+//! CPU extensions required to execute instruction.
+struct CommonInfoTableB {
+ //! Features vector.
+ uint8_t _features[6];
+ //! Index to `_rwFlagsTable`.
+ uint8_t _rwFlagsIndex;
+ //! Reserved for future use.
+ uint8_t _reserved;
+
+ inline const uint8_t* featuresBegin() const noexcept { return _features; }
+ inline const uint8_t* featuresEnd() const noexcept { return _features + ASMJIT_ARRAY_SIZE(_features); }
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB - InstNameIndex]
+// ============================================================================
+
+// ${NameLimits:Begin}
+// ------------------- Automatically generated, do not edit -------------------
+enum : uint32_t { kMaxNameSize = 17 };
+// ----------------------------------------------------------------------------
+// ${NameLimits:End}
+
+struct InstNameIndex {
+ uint16_t start;
+ uint16_t end;
+};
+
+// ============================================================================
+// [asmjit::x86::InstDB - RWInfo]
+// ============================================================================
+
+struct RWInfo {
+ enum Category : uint8_t {
+ kCategoryGeneric,
+ kCategoryMov,
+ kCategoryImul,
+ kCategoryMovh64,
+ kCategoryVmaskmov,
+ kCategoryVmovddup,
+ kCategoryVmovmskpd,
+ kCategoryVmovmskps,
+ kCategoryVmov1_2,
+ kCategoryVmov1_4,
+ kCategoryVmov1_8,
+ kCategoryVmov2_1,
+ kCategoryVmov4_1,
+ kCategoryVmov8_1
+ };
+
+ uint8_t category;
+ uint8_t rmInfo;
+ uint8_t opInfoIndex[6];
+};
+
+struct RWInfoOp {
+ uint64_t rByteMask;
+ uint64_t wByteMask;
+ uint8_t physId;
+ uint8_t reserved[3];
+ uint32_t flags;
+};
+
+//! R/M information.
+//!
+//! This data is used to replace register operand by a memory operand reliably.
+struct RWInfoRm {
+ enum Category : uint8_t {
+ kCategoryNone = 0,
+ kCategoryFixed,
+ kCategoryConsistent,
+ kCategoryHalf,
+ kCategoryQuarter,
+ kCategoryEighth
+ };
+
+ enum Flags : uint8_t {
+ kFlagAmbiguous = 0x01
+ };
+
+ uint8_t category;
+ uint8_t rmOpsMask;
+ uint8_t fixedSize;
+ uint8_t flags;
+ uint8_t rmFeature;
+};
+
+struct RWFlagsInfoTable {
+ //! CPU/FPU flags read.
+ uint32_t readFlags;
+ //! CPU/FPU flags written or undefined.
+ uint32_t writeFlags;
+};
+
+extern const uint8_t rwInfoIndex[Inst::_kIdCount * 2];
+extern const RWInfo rwInfo[];
+extern const RWInfoOp rwInfoOp[];
+extern const RWInfoRm rwInfoRm[];
+extern const RWFlagsInfoTable _rwFlagsInfoTable[];
+
+// ============================================================================
+// [asmjit::x86::InstDB::Tables]
+// ============================================================================
+
+extern const uint32_t _mainOpcodeTable[];
+extern const uint32_t _altOpcodeTable[];
+
+#ifndef ASMJIT_NO_TEXT
+extern const char _nameData[];
+extern const InstNameIndex instNameIndex[26];
+#endif // !ASMJIT_NO_TEXT
+
+extern const CommonInfoTableB _commonInfoTableB[];
+
+} // {InstDB}
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86INSTDB_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86internal.cpp b/3rdparty/asmjit/src/asmjit/x86/x86internal.cpp
new file mode 100644
index 00000000000..a35b0d34e33
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86internal.cpp
@@ -0,0 +1,1633 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/logging.h"
+#include "../core/string.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../x86/x86internal_p.h"
+
+// Can be used for debugging...
+// #define ASMJIT_DUMP_ARGS_ASSIGNMENT
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::X86Internal - Helpers]
+// ============================================================================
+
+static ASMJIT_INLINE uint32_t x86GetXmmMovInst(const FuncFrame& frame) {
+ bool avx = frame.isAvxEnabled();
+ bool aligned = frame.hasAlignedVecSR();
+
+ return aligned ? (avx ? Inst::kIdVmovaps : Inst::kIdMovaps)
+ : (avx ? Inst::kIdVmovups : Inst::kIdMovups);
+}
+
+static ASMJIT_INLINE uint32_t x86VecTypeIdToRegType(uint32_t typeId) noexcept {
+ return typeId <= Type::_kIdVec128End ? Reg::kTypeXmm :
+ typeId <= Type::_kIdVec256End ? Reg::kTypeYmm : Reg::kTypeZmm;
+}
+
+//! Converts `size` to a 'kmov?' instructio.
+static inline uint32_t x86KmovFromSize(uint32_t size) noexcept {
+ switch (size) {
+ case 1: return Inst::kIdKmovb;
+ case 2: return Inst::kIdKmovw;
+ case 4: return Inst::kIdKmovd;
+ case 8: return Inst::kIdKmovq;
+ default: return Inst::kIdNone;
+ }
+}
+
+// ============================================================================
+// [asmjit::X86Internal - FuncDetail]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error X86Internal::initFuncDetail(FuncDetail& func, const FuncSignature& sign, uint32_t gpSize) noexcept {
+ DebugUtils::unused(sign);
+
+ const CallConv& cc = func.callConv();
+ uint32_t archId = cc.archId();
+ uint32_t stackOffset = cc._spillZoneSize;
+
+ uint32_t i;
+ uint32_t argCount = func.argCount();
+
+ if (func.retCount() != 0) {
+ uint32_t typeId = func._rets[0].typeId();
+ switch (typeId) {
+ case Type::kIdI64:
+ case Type::kIdU64: {
+ if (archId == ArchInfo::kIdX86) {
+ // Convert a 64-bit return value to two 32-bit return values.
+ func._retCount = 2;
+ typeId -= 2;
+
+ // 64-bit value is returned in EDX:EAX on X86.
+ func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, typeId);
+ func._rets[1].initReg(Reg::kTypeGpd, Gp::kIdDx, typeId);
+ break;
+ }
+ else {
+ func._rets[0].initReg(Reg::kTypeGpq, Gp::kIdAx, typeId);
+ }
+ break;
+ }
+
+ case Type::kIdI8:
+ case Type::kIdI16:
+ case Type::kIdI32: {
+ func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, Type::kIdI32);
+ break;
+ }
+
+ case Type::kIdU8:
+ case Type::kIdU16:
+ case Type::kIdU32: {
+ func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, Type::kIdU32);
+ break;
+ }
+
+ case Type::kIdF32:
+ case Type::kIdF64: {
+ uint32_t regType = (archId == ArchInfo::kIdX86) ? Reg::kTypeSt : Reg::kTypeXmm;
+ func._rets[0].initReg(regType, 0, typeId);
+ break;
+ }
+
+ case Type::kIdF80: {
+ // 80-bit floats are always returned by FP0.
+ func._rets[0].initReg(Reg::kTypeSt, 0, typeId);
+ break;
+ }
+
+ case Type::kIdMmx32:
+ case Type::kIdMmx64: {
+ // MM registers are returned through XMM or GPQ (Win64).
+ uint32_t regType = Reg::kTypeMm;
+ if (archId != ArchInfo::kIdX86)
+ regType = cc.strategy() == CallConv::kStrategyDefault ? Reg::kTypeXmm : Reg::kTypeGpq;
+
+ func._rets[0].initReg(regType, 0, typeId);
+ break;
+ }
+
+ default: {
+ func._rets[0].initReg(x86VecTypeIdToRegType(typeId), 0, typeId);
+ break;
+ }
+ }
+ }
+
+ if (cc.strategy() == CallConv::kStrategyDefault) {
+ uint32_t gpzPos = 0;
+ uint32_t vecPos = 0;
+
+ for (i = 0; i < argCount; i++) {
+ FuncValue& arg = func._args[i];
+ uint32_t typeId = arg.typeId();
+
+ if (Type::isInt(typeId)) {
+ uint32_t regId = gpzPos < CallConv::kMaxRegArgsPerGroup ? cc._passedOrder[Reg::kGroupGp].id[gpzPos] : uint8_t(BaseReg::kIdBad);
+ if (regId != BaseReg::kIdBad) {
+ uint32_t regType = (typeId <= Type::kIdU32) ? Reg::kTypeGpd : Reg::kTypeGpq;
+ arg.assignRegData(regType, regId);
+ func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
+ gpzPos++;
+ }
+ else {
+ uint32_t size = Support::max<uint32_t>(Type::sizeOf(typeId), gpSize);
+ arg.assignStackOffset(int32_t(stackOffset));
+ stackOffset += size;
+ }
+ continue;
+ }
+
+ if (Type::isFloat(typeId) || Type::isVec(typeId)) {
+ uint32_t regId = vecPos < CallConv::kMaxRegArgsPerGroup ? cc._passedOrder[Reg::kGroupVec].id[vecPos] : uint8_t(BaseReg::kIdBad);
+
+ // If this is a float, but `floatByVec` is false, we have to pass by stack.
+ if (Type::isFloat(typeId) && !cc.hasFlag(CallConv::kFlagPassFloatsByVec))
+ regId = BaseReg::kIdBad;
+
+ if (regId != BaseReg::kIdBad) {
+ arg.initTypeId(typeId);
+ arg.assignRegData(x86VecTypeIdToRegType(typeId), regId);
+ func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
+ vecPos++;
+ }
+ else {
+ uint32_t size = Type::sizeOf(typeId);
+ arg.assignStackOffset(int32_t(stackOffset));
+ stackOffset += size;
+ }
+ continue;
+ }
+ }
+ }
+
+ if (cc.strategy() == CallConv::kStrategyWin64) {
+ for (i = 0; i < argCount; i++) {
+ FuncValue& arg = func._args[i];
+
+ uint32_t typeId = arg.typeId();
+ uint32_t size = Type::sizeOf(typeId);
+
+ if (Type::isInt(typeId) || Type::isMmx(typeId)) {
+ uint32_t regId = i < CallConv::kMaxRegArgsPerGroup ? cc._passedOrder[Reg::kGroupGp].id[i] : uint8_t(BaseReg::kIdBad);
+ if (regId != BaseReg::kIdBad) {
+ uint32_t regType = (size <= 4 && !Type::isMmx(typeId)) ? Reg::kTypeGpd : Reg::kTypeGpq;
+ arg.assignRegData(regType, regId);
+ func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
+ }
+ else {
+ arg.assignStackOffset(int32_t(stackOffset));
+ stackOffset += gpSize;
+ }
+ continue;
+ }
+
+ if (Type::isFloat(typeId) || Type::isVec(typeId)) {
+ uint32_t regId = BaseReg::kIdBad;
+ if (i < CallConv::kMaxRegArgsPerGroup)
+ regId = cc._passedOrder[Reg::kGroupVec].id[i];
+
+ if (regId != BaseReg::kIdBad && (Type::isFloat(typeId) || cc.hasFlag(CallConv::kFlagVectorCall))) {
+ uint32_t regType = x86VecTypeIdToRegType(typeId);
+ arg.assignRegData(regType, regId);
+ func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
+ }
+ else {
+ arg.assignStackOffset(int32_t(stackOffset));
+ stackOffset += 8; // Always 8 bytes (float/double).
+ }
+ continue;
+ }
+ }
+ }
+
+ func._argStackSize = stackOffset;
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::X86FuncArgsContext]
+// ============================================================================
+
+static RegInfo x86GetRegForMemToMemMove(uint32_t archId, uint32_t dstTypeId, uint32_t srcTypeId) noexcept {
+ uint32_t dstSize = Type::sizeOf(dstTypeId);
+ uint32_t srcSize = Type::sizeOf(srcTypeId);
+ uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
+ uint32_t gpSize = archId == ArchInfo::kIdX86 ? 4 : 8;
+
+ uint32_t signature = 0;
+ if (maxSize <= gpSize || (Type::isInt(dstTypeId) && Type::isInt(srcTypeId)))
+ signature = maxSize <= 4 ? Gpd::kSignature : Gpq::kSignature;
+ else if (maxSize <= 16)
+ signature = Xmm::kSignature;
+ else if (maxSize <= 32)
+ signature = Ymm::kSignature;
+ else if (maxSize <= 64)
+ signature = Zmm::kSignature;
+
+ return RegInfo { signature };
+}
+
+// Used by both `argsToFuncFrame()` and `emitArgsAssignment()`.
+class X86FuncArgsContext {
+public:
+ enum VarId : uint32_t {
+ kVarIdNone = 0xFF
+ };
+
+ //! Contains information about a single argument or SA register that may need shuffling.
+ struct Var {
+ inline void init(const FuncValue& cur_, const FuncValue& out_) noexcept {
+ cur = cur_;
+ out = out_;
+ }
+
+ //! Reset the value to its unassigned state.
+ inline void reset() noexcept {
+ cur.reset();
+ out.reset();
+ }
+
+ inline bool isDone() const noexcept { return cur.isDone(); }
+ inline void markDone() noexcept { cur.addFlags(FuncValue::kFlagIsDone); }
+
+ FuncValue cur;
+ FuncValue out;
+ };
+
+ struct WorkData {
+ inline void reset() noexcept {
+ _archRegs = 0;
+ _workRegs = 0;
+ _usedRegs = 0;
+ _assignedRegs = 0;
+ _dstRegs = 0;
+ _dstShuf = 0;
+ _numSwaps = 0;
+ _numStackArgs = 0;
+ memset(_reserved, 0, sizeof(_reserved));
+ memset(_physToVarId, kVarIdNone, 32);
+ }
+
+ inline bool isAssigned(uint32_t regId) const noexcept {
+ ASMJIT_ASSERT(regId < 32);
+ return Support::bitTest(_assignedRegs, regId);
+ }
+
+ inline void assign(uint32_t varId, uint32_t regId) noexcept {
+ ASMJIT_ASSERT(!isAssigned(regId));
+ ASMJIT_ASSERT(_physToVarId[regId] == kVarIdNone);
+
+ _physToVarId[regId] = uint8_t(varId);
+ _assignedRegs ^= Support::bitMask(regId);
+ }
+
+ inline void reassign(uint32_t varId, uint32_t newId, uint32_t oldId) noexcept {
+ ASMJIT_ASSERT( isAssigned(oldId));
+ ASMJIT_ASSERT(!isAssigned(newId));
+ ASMJIT_ASSERT(_physToVarId[oldId] == varId);
+ ASMJIT_ASSERT(_physToVarId[newId] == kVarIdNone);
+
+ _physToVarId[oldId] = uint8_t(kVarIdNone);
+ _physToVarId[newId] = uint8_t(varId);
+ _assignedRegs ^= Support::bitMask(newId) ^ Support::bitMask(oldId);
+ }
+
+ inline void swap(uint32_t aVarId, uint32_t aRegId, uint32_t bVarId, uint32_t bRegId) noexcept {
+ ASMJIT_ASSERT(isAssigned(aRegId));
+ ASMJIT_ASSERT(isAssigned(bRegId));
+ ASMJIT_ASSERT(_physToVarId[aRegId] == aVarId);
+ ASMJIT_ASSERT(_physToVarId[bRegId] == bVarId);
+
+ _physToVarId[aRegId] = uint8_t(bVarId);
+ _physToVarId[bRegId] = uint8_t(aVarId);
+ }
+
+ inline void unassign(uint32_t varId, uint32_t regId) noexcept {
+ ASMJIT_ASSERT(isAssigned(regId));
+ ASMJIT_ASSERT(_physToVarId[regId] == varId);
+
+ DebugUtils::unused(varId);
+ _physToVarId[regId] = uint8_t(kVarIdNone);
+ _assignedRegs ^= Support::bitMask(regId);
+ }
+
+ inline uint32_t archRegs() const noexcept { return _archRegs; }
+ inline uint32_t workRegs() const noexcept { return _workRegs; }
+ inline uint32_t usedRegs() const noexcept { return _usedRegs; }
+ inline uint32_t assignedRegs() const noexcept { return _assignedRegs; }
+ inline uint32_t dstRegs() const noexcept { return _dstRegs; }
+ inline uint32_t availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
+
+ uint32_t _archRegs; //!< All allocable registers provided by the architecture.
+ uint32_t _workRegs; //!< All registers that can be used by the shuffler.
+ uint32_t _usedRegs; //!< Registers used by the shuffler (all).
+ uint32_t _assignedRegs; //!< Assigned registers.
+ uint32_t _dstRegs; //!< Destination registers assigned to arguments or SA.
+ uint32_t _dstShuf; //!< Destination registers that require shuffling.
+ uint8_t _numSwaps; //!< Number of register swaps.
+ uint8_t _numStackArgs; //!< Number of stack loads.
+ uint8_t _reserved[6]; //!< Reserved (only used as padding).
+ uint8_t _physToVarId[32]; //!< Physical ID to variable ID mapping.
+ };
+
+ uint8_t _archId;
+ bool _hasStackSrc; //!< Has arguments passed via stack (SRC).
+ bool _hasPreservedFP; //!< Has preserved frame-pointer (FP).
+ uint8_t _stackDstMask; //!< Has arguments assigned to stack (DST).
+ uint8_t _regSwapsMask; //!< Register swap groups (bit-mask).
+ uint8_t _saVarId;
+ uint32_t _varCount;
+ WorkData _workData[BaseReg::kGroupVirt];
+ Var _vars[kFuncArgCountLoHi + 1];
+
+ X86FuncArgsContext() noexcept;
+
+ inline uint32_t archId() const noexcept { return _archId; }
+ inline uint32_t varCount() const noexcept { return _varCount; }
+
+ inline Var& var(uint32_t varId) noexcept { return _vars[varId]; }
+ inline const Var& var(uint32_t varId) const noexcept { return _vars[varId]; }
+ inline uint32_t indexOf(const Var* var) const noexcept { return uint32_t((size_t)(var - _vars)); }
+
+ Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args) noexcept;
+ Error markScratchRegs(FuncFrame& frame) noexcept;
+ Error markDstRegsDirty(FuncFrame& frame) noexcept;
+ Error markStackArgsReg(FuncFrame& frame) noexcept;
+};
+
+X86FuncArgsContext::X86FuncArgsContext() noexcept {
+ _archId = ArchInfo::kIdNone;
+ _varCount = 0;
+ _hasStackSrc = false;
+ _hasPreservedFP = false;
+ _stackDstMask = 0;
+ _regSwapsMask = 0;
+ _saVarId = kVarIdNone;
+
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
+ _workData[group].reset();
+}
+
+ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args) noexcept {
+ // The code has to be updated if this changes.
+ ASMJIT_ASSERT(BaseReg::kGroupVirt == 4);
+
+ uint32_t i;
+ const FuncDetail& func = *args.funcDetail();
+
+ // Initialize ArchType.
+ uint32_t archId = func.callConv().archId();
+ uint32_t archRegCount = (archId == ArchInfo::kIdX86) ? 8 : 16;
+
+ _archId = uint8_t(archId);
+
+ // Initialize `_archRegs`.
+ _workData[Reg::kGroupGp ]._archRegs = Support::lsbMask<uint32_t>(archRegCount) & ~Support::bitMask(Gp::kIdSp);
+ _workData[Reg::kGroupVec ]._archRegs = Support::lsbMask<uint32_t>(archRegCount);
+ _workData[Reg::kGroupMm ]._archRegs = Support::lsbMask<uint32_t>(8);
+ _workData[Reg::kGroupKReg]._archRegs = Support::lsbMask<uint32_t>(8);
+
+ if (frame.hasPreservedFP())
+ _workData[Reg::kGroupGp]._archRegs &= ~Support::bitMask(Gp::kIdBp);
+
+ // Extract information from all function arguments/assignments and build Var[] array.
+ uint32_t varId = 0;
+ for (i = 0; i < kFuncArgCountLoHi; i++) {
+ const FuncValue& dst_ = args.arg(i);
+ if (!dst_.isAssigned()) continue;
+
+ const FuncValue& src_ = func.arg(i);
+ if (ASMJIT_UNLIKELY(!src_.isAssigned()))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ Var& var = _vars[varId];
+ var.init(src_, dst_);
+
+ FuncValue& src = var.cur;
+ FuncValue& dst = var.out;
+
+ uint32_t dstGroup = 0xFFFFFFFFu;
+ uint32_t dstId = BaseReg::kIdBad;
+ WorkData* dstWd = nullptr;
+
+ if (dst.isReg()) {
+ uint32_t dstType = dst.regType();
+ if (ASMJIT_UNLIKELY(dstType >= Reg::kTypeCount))
+ return DebugUtils::errored(kErrorInvalidRegType);
+
+ // Copy TypeId from source if the destination doesn't have it. The RA
+ // used by BaseCompiler would never leave TypeId undefined, but users
+ // of FuncAPI can just assign phys regs without specifying the type.
+ if (!dst.hasTypeId())
+ dst.setTypeId(Reg::typeIdOf(dst.regType()));
+
+ dstGroup = Reg::groupOf(dstType);
+ if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
+ return DebugUtils::errored(kErrorInvalidRegGroup);
+
+ dstWd = &_workData[dstGroup];
+ dstId = dst.regId();
+ if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
+ return DebugUtils::errored(kErrorInvalidPhysId);
+
+ if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+
+ dstWd->_dstRegs |= Support::bitMask(dstId);
+ dstWd->_dstShuf |= Support::bitMask(dstId);
+ dstWd->_usedRegs |= Support::bitMask(dstId);
+ }
+ else {
+ if (!dst.hasTypeId())
+ dst.setTypeId(src.typeId());
+
+ RegInfo regInfo = x86GetRegForMemToMemMove(archId, dst.typeId(), src.typeId());
+ if (ASMJIT_UNLIKELY(!regInfo.isValid()))
+ return DebugUtils::errored(kErrorInvalidState);
+ _stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
+ }
+
+ if (src.isReg()) {
+ uint32_t srcId = src.regId();
+ uint32_t srcGroup = Reg::groupOf(src.regType());
+
+ if (dstGroup == srcGroup) {
+ dstWd->assign(varId, srcId);
+
+ // The best case, register is allocated where it is expected to be.
+ if (dstId == srcId)
+ var.markDone();
+ }
+ else {
+ if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ WorkData& srcData = _workData[srcGroup];
+ srcData.assign(varId, srcId);
+ }
+ }
+ else {
+ if (dstWd)
+ dstWd->_numStackArgs++;
+ _hasStackSrc = true;
+ }
+
+ varId++;
+ }
+
+ // Initialize WorkData::workRegs.
+ for (i = 0; i < BaseReg::kGroupVirt; i++)
+ _workData[i]._workRegs = (_workData[i].archRegs() & (frame.dirtyRegs(i) | ~frame.preservedRegs(i))) | _workData[i].dstRegs() | _workData[i].assignedRegs();
+
+ // Create a variable that represents `SARegId` if necessary.
+ bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
+
+ WorkData& gpRegs = _workData[BaseReg::kGroupGp];
+ uint32_t saCurRegId = frame.saRegId();
+ uint32_t saOutRegId = args.saRegId();
+
+ if (saCurRegId != BaseReg::kIdBad) {
+ // Check if the provided `SARegId` doesn't collide with input registers.
+ if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId)))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ }
+
+ if (saOutRegId != BaseReg::kIdBad) {
+ // Check if the provided `SARegId` doesn't collide with argument assignments.
+ if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId)))
+ return DebugUtils::errored(kErrorOverlappedRegs);
+ saRegRequired = true;
+ }
+
+ if (saRegRequired) {
+ uint32_t ptrTypeId = (archId == ArchInfo::kIdX86) ? Type::kIdU32 : Type::kIdU64;
+ uint32_t ptrRegType = (archId == ArchInfo::kIdX86) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
+
+ _saVarId = uint8_t(varId);
+ _hasPreservedFP = frame.hasPreservedFP();
+
+ Var& var = _vars[varId];
+ var.reset();
+
+ if (saCurRegId == BaseReg::kIdBad) {
+ if (saOutRegId != BaseReg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
+ saCurRegId = saOutRegId;
+ }
+ else {
+ uint32_t availableRegs = gpRegs.availableRegs();
+ if (!availableRegs)
+ availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
+
+ if (ASMJIT_UNLIKELY(!availableRegs))
+ return DebugUtils::errored(kErrorNoMorePhysRegs);
+
+ saCurRegId = Support::ctz(availableRegs);
+ }
+ }
+
+ var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
+ gpRegs.assign(varId, saCurRegId);
+ gpRegs._workRegs |= Support::bitMask(saCurRegId);
+
+ if (saOutRegId != BaseReg::kIdBad) {
+ var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
+ gpRegs._dstRegs |= Support::bitMask(saOutRegId);
+ gpRegs._workRegs |= Support::bitMask(saOutRegId);
+ }
+ else {
+ var.markDone();
+ }
+
+ varId++;
+ }
+
+ _varCount = varId;
+
+ // Detect register swaps.
+ for (varId = 0; varId < _varCount; varId++) {
+ Var& var = _vars[varId];
+ if (var.cur.isReg() && var.out.isReg()) {
+ uint32_t srcId = var.cur.regId();
+ uint32_t dstId = var.out.regId();
+
+ uint32_t group = Reg::groupOf(var.cur.regType());
+ if (group != Reg::groupOf(var.out.regType()))
+ continue;
+
+ WorkData& wd = _workData[group];
+ if (wd.isAssigned(dstId)) {
+ Var& other = _vars[wd._physToVarId[dstId]];
+ if (Reg::groupOf(other.out.regType()) == group && other.out.regId() == srcId) {
+ wd._numSwaps++;
+ _regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
+ for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++) {
+ WorkData& wd = _workData[i];
+ uint32_t regs = wd.usedRegs() | wd._dstShuf;
+
+ wd._workRegs |= regs;
+ frame.addDirtyRegs(i, regs);
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
+ uint32_t groupMask = 0;
+
+ // Handle stack to stack moves.
+ groupMask |= _stackDstMask;
+
+ // Handle register swaps.
+ groupMask |= _regSwapsMask & ~Support::bitMask(BaseReg::kGroupGp);
+
+ if (!groupMask)
+ return kErrorOk;
+
+ // selects one dirty register per affected group that can be used as a scratch register.
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ if (Support::bitTest(groupMask, group)) {
+ WorkData& wd = _workData[group];
+
+ // Initially, pick some clobbered or dirty register.
+ uint32_t workRegs = wd.workRegs();
+ uint32_t regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
+
+ // If that didn't work out pick some register which is not in 'used'.
+ if (!regs) regs = workRegs & ~wd.usedRegs();
+
+ // If that didn't work out pick any other register that is allocable.
+ // This last resort case will, however, result in marking one more
+ // register dirty.
+ if (!regs) regs = wd.archRegs() & ~workRegs;
+
+ // If that didn't work out we will have to use XORs instead of MOVs.
+ if (!regs) continue;
+
+ uint32_t regMask = Support::blsi(regs);
+ wd._workRegs |= regMask;
+ frame.addDirtyRegs(group, regMask);
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
+ // TODO: Validate, improve...
+ if (_saVarId != kVarIdNone) {
+ const Var& var = _vars[_saVarId];
+ frame.setSARegId(var.cur.regId());
+ }
+ else if (frame.hasPreservedFP()) {
+ // Always EBP|RBP if the frame-pointer isn't omitted.
+ frame.setSARegId(Gp::kIdBp);
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::X86Internal - FrameLayout]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error X86Internal::initFuncFrame(FuncFrame& frame, const FuncDetail& func) noexcept {
+ uint32_t archId = func.callConv().archId();
+
+ // Initializing FuncFrame means making a copy of some properties of `func`.
+ // Properties like `_localStackSize` will be set by the user before the frame
+ // is finalized.
+ frame.reset();
+
+ frame._archId = uint8_t(archId);
+ frame._spRegId = Gp::kIdSp;
+ frame._saRegId = Gp::kIdBad;
+
+ uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
+ uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
+
+ if (minDynamicAlignment == naturalStackAlignment)
+ minDynamicAlignment <<= 1;
+
+ frame._naturalStackAlignment = uint8_t(naturalStackAlignment);
+ frame._minDynamicAlignment = uint8_t(minDynamicAlignment);
+ frame._redZoneSize = uint8_t(func.redZoneSize());
+ frame._spillZoneSize = uint8_t(func.spillZoneSize());
+ frame._finalStackAlignment = uint8_t(frame._naturalStackAlignment);
+
+ if (func.hasFlag(CallConv::kFlagCalleePopsStack)) {
+ frame._calleeStackCleanup = uint16_t(func.argStackSize());
+ }
+
+ // Initial masks of dirty and preserved registers.
+ for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
+ frame._dirtyRegs[group] = func.usedRegs(group);
+ frame._preservedRegs[group] = func.preservedRegs(group);
+ }
+
+ // Exclude ESP/RSP - this register is never included in saved GP regs.
+ frame._preservedRegs[BaseReg::kGroupGp] &= ~Support::bitMask(Gp::kIdSp);
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error X86Internal::finalizeFuncFrame(FuncFrame& frame) noexcept {
+ uint32_t gpSize = frame.archId() == ArchInfo::kIdX86 ? 4 : 8;
+
+ // The final stack alignment must be updated accordingly to call and local stack alignments.
+ uint32_t stackAlignment = frame._finalStackAlignment;
+ ASMJIT_ASSERT(stackAlignment == Support::max(frame._naturalStackAlignment,
+ frame._callStackAlignment,
+ frame._localStackAlignment));
+
+ // TODO: Must be configurable.
+ uint32_t vecSize = 16;
+
+ bool hasFP = frame.hasPreservedFP();
+ bool hasDA = frame.hasDynamicAlignment();
+
+ // Include EBP|RBP if the function preserves the frame-pointer.
+ if (hasFP)
+ frame._dirtyRegs[Reg::kGroupGp] |= Support::bitMask(Gp::kIdBp);
+
+ // These two are identical if the function doesn't align its stack dynamically.
+ uint32_t saRegId = frame.saRegId();
+ if (saRegId == BaseReg::kIdBad)
+ saRegId = Gp::kIdSp;
+
+ // Fix stack arguments base-register from ESP|RSP to EBP|RBP in case it was
+ // not picked before and the function performs dynamic stack alignment.
+ if (hasDA && saRegId == Gp::kIdSp)
+ saRegId = Gp::kIdBp;
+
+ // Mark as dirty any register but ESP|RSP if used as SA pointer.
+ if (saRegId != Gp::kIdSp)
+ frame._dirtyRegs[Reg::kGroupGp] |= Support::bitMask(saRegId);
+
+ frame._spRegId = uint8_t(Gp::kIdSp);
+ frame._saRegId = uint8_t(saRegId);
+
+ // Setup stack size used to save preserved registers.
+ frame._gpSaveSize = uint16_t(Support::popcnt(frame.savedRegs(Reg::kGroupGp )) * gpSize);
+ frame._nonGpSaveSize = uint16_t(Support::popcnt(frame.savedRegs(Reg::kGroupVec )) * vecSize +
+ Support::popcnt(frame.savedRegs(Reg::kGroupMm )) * 8 +
+ Support::popcnt(frame.savedRegs(Reg::kGroupKReg)) * 8);
+
+ uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
+ v += frame.callStackSize(); // Count 'callStackSize' <- This is used to call functions.
+ v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
+
+ frame._localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
+ v += frame.localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
+
+ // If the function's stack must be aligned, calculate the alignment necessary
+ // to store vector registers, and set `FuncFrame::kAttrAlignedVecSR` to inform
+ // PEI that it can use instructions that perform aligned stores/loads.
+ if (stackAlignment >= vecSize && frame._nonGpSaveSize) {
+ frame.addAttributes(FuncFrame::kAttrAlignedVecSR);
+ v = Support::alignUp(v, vecSize); // Align '_nonGpSaveOffset'.
+ }
+
+ frame._nonGpSaveOffset = v; // Store '_nonGpSaveOffset' <- Non-GP Save/Restore starts here.
+ v += frame._nonGpSaveSize; // Count '_nonGpSaveSize' <- Non-GP Save/Restore ends here.
+
+ // Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
+ if (hasDA && !hasFP) {
+ frame._daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
+ v += gpSize; // Count 'daOffset'.
+ }
+ else {
+ frame._daOffset = FuncFrame::kTagInvalidOffset;
+ }
+
+ // The return address should be stored after GP save/restore regs. It has
+ // the same size as `gpSize` (basically the native register/pointer size).
+ // We don't adjust it now as `v` now contains the exact size that the
+ // function requires to adjust (call frame + stack frame, vec stack size).
+ // The stack (if we consider this size) is misaligned now, as it's always
+ // aligned before the function call - when `call()` is executed it pushes
+ // the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes
+ // (depending on the architecture). So count number of bytes needed to align
+ // it up to the function's CallFrame (the beginning).
+ if (v || frame.hasFuncCalls())
+ v += Support::alignUpDiff(v + frame.gpSaveSize() + gpSize, stackAlignment);
+
+ frame._gpSaveOffset = v; // Store 'gpSaveOffset' <- Function's GP Save/Restore starts here.
+ frame._stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add zsp, SA' and 'sub zsp, SA'.
+
+ v += frame._gpSaveSize; // Count 'gpSaveSize' <- Function's GP Save/Restore ends here.
+ v += gpSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
+
+ // If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
+ if (hasDA)
+ frame._stackAdjustment = Support::alignUp(frame._stackAdjustment, stackAlignment);
+
+ uint32_t saInvOff = FuncFrame::kTagInvalidOffset;
+ uint32_t saTmpOff = gpSize + frame._gpSaveSize;
+
+ // Calculate where the function arguments start relative to SP.
+ frame._saOffsetFromSP = hasDA ? saInvOff : v;
+
+ // Calculate where the function arguments start relative to FP or user-provided register.
+ frame._saOffsetFromSA = hasFP ? gpSize * 2 // Return address + frame pointer.
+ : saTmpOff; // Return address + all saved GP regs.
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::X86Internal - ArgsToFrameInfo]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error X86Internal::argsToFuncFrame(const FuncArgsAssignment& args, FuncFrame& frame) noexcept {
+ X86FuncArgsContext ctx;
+ ASMJIT_PROPAGATE(ctx.initWorkData(frame, args));
+ ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
+ ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
+ ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::X86Internal - Emit Helpers]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error X86Internal::emitRegMove(Emitter* emitter,
+ const Operand_& dst_,
+ const Operand_& src_, uint32_t typeId, bool avxEnabled, const char* comment) {
+
+ // Invalid or abstract TypeIds are not allowed.
+ ASMJIT_ASSERT(Type::isValid(typeId) && !Type::isAbstract(typeId));
+
+ Operand dst(dst_);
+ Operand src(src_);
+
+ uint32_t instId = Inst::kIdNone;
+ uint32_t memFlags = 0;
+ uint32_t overrideMemSize = 0;
+
+ enum MemFlags : uint32_t {
+ kDstMem = 0x1,
+ kSrcMem = 0x2
+ };
+
+ // Detect memory operands and patch them to have the same size as the register.
+ // BaseCompiler always sets memory size of allocs and spills, so it shouldn't
+ // be really necessary, however, after this function was separated from Compiler
+ // it's better to make sure that the size is always specified, as we can use
+ // 'movzx' and 'movsx' that rely on it.
+ if (dst.isMem()) { memFlags |= kDstMem; dst.as<Mem>().setSize(src.size()); }
+ if (src.isMem()) { memFlags |= kSrcMem; src.as<Mem>().setSize(dst.size()); }
+
+ switch (typeId) {
+ case Type::kIdI8:
+ case Type::kIdU8:
+ case Type::kIdI16:
+ case Type::kIdU16:
+ // Special case - 'movzx' load.
+ if (memFlags & kSrcMem) {
+ instId = Inst::kIdMovzx;
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ }
+ else if (!memFlags) {
+ // Change both destination and source registers to GPD (safer, no dependencies).
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ src.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case Type::kIdI32:
+ case Type::kIdU32:
+ case Type::kIdI64:
+ case Type::kIdU64:
+ instId = Inst::kIdMov;
+ break;
+
+ case Type::kIdMmx32:
+ instId = Inst::kIdMovd;
+ if (memFlags) break;
+ ASMJIT_FALLTHROUGH;
+
+ case Type::kIdMmx64 : instId = Inst::kIdMovq ; break;
+ case Type::kIdMask8 : instId = Inst::kIdKmovb; break;
+ case Type::kIdMask16: instId = Inst::kIdKmovw; break;
+ case Type::kIdMask32: instId = Inst::kIdKmovd; break;
+ case Type::kIdMask64: instId = Inst::kIdKmovq; break;
+
+ default: {
+ uint32_t elementTypeId = Type::baseOf(typeId);
+ if (Type::isVec32(typeId) && memFlags) {
+ overrideMemSize = 4;
+ if (elementTypeId == Type::kIdF32)
+ instId = avxEnabled ? Inst::kIdVmovss : Inst::kIdMovss;
+ else
+ instId = avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd;
+ break;
+ }
+
+ if (Type::isVec64(typeId) && memFlags) {
+ overrideMemSize = 8;
+ if (elementTypeId == Type::kIdF64)
+ instId = avxEnabled ? Inst::kIdVmovsd : Inst::kIdMovsd;
+ else
+ instId = avxEnabled ? Inst::kIdVmovq : Inst::kIdMovq;
+ break;
+ }
+
+ if (elementTypeId == Type::kIdF32)
+ instId = avxEnabled ? Inst::kIdVmovaps : Inst::kIdMovaps;
+ else if (elementTypeId == Type::kIdF64)
+ instId = avxEnabled ? Inst::kIdVmovapd : Inst::kIdMovapd;
+ else if (typeId <= Type::_kIdVec256End)
+ instId = avxEnabled ? Inst::kIdVmovdqa : Inst::kIdMovdqa;
+ else if (elementTypeId <= Type::kIdU32)
+ instId = Inst::kIdVmovdqa32;
+ else
+ instId = Inst::kIdVmovdqa64;
+ break;
+ }
+ }
+
+ if (!instId)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ if (overrideMemSize) {
+ if (dst.isMem()) dst.as<Mem>().setSize(overrideMemSize);
+ if (src.isMem()) src.as<Mem>().setSize(overrideMemSize);
+ }
+
+ emitter->setInlineComment(comment);
+ return emitter->emit(instId, dst, src);
+}
+
+ASMJIT_FAVOR_SIZE Error X86Internal::emitArgMove(Emitter* emitter,
+ const Reg& dst_, uint32_t dstTypeId,
+ const Operand_& src_, uint32_t srcTypeId, bool avxEnabled, const char* comment) {
+
+ // Deduce optional `dstTypeId`, which may be `Type::kIdVoid` in some cases.
+ if (!dstTypeId) dstTypeId = opData.archRegs.regTypeToTypeId[dst_.type()];
+
+ // Invalid or abstract TypeIds are not allowed.
+ ASMJIT_ASSERT(Type::isValid(dstTypeId) && !Type::isAbstract(dstTypeId));
+ ASMJIT_ASSERT(Type::isValid(srcTypeId) && !Type::isAbstract(srcTypeId));
+
+ Reg dst(dst_);
+ Operand src(src_);
+
+ uint32_t dstSize = Type::sizeOf(dstTypeId);
+ uint32_t srcSize = Type::sizeOf(srcTypeId);
+
+ uint32_t instId = Inst::kIdNone;
+
+ // Not a real loop, just 'break' is nicer than 'goto'.
+ for (;;) {
+ if (Type::isInt(dstTypeId)) {
+ if (Type::isInt(srcTypeId)) {
+ instId = Inst::kIdMovsx;
+ uint32_t typeOp = (dstTypeId << 8) | srcTypeId;
+
+ // Sign extend by using 'movsx'.
+ if (typeOp == ((Type::kIdI16 << 8) | Type::kIdI8 ) ||
+ typeOp == ((Type::kIdI32 << 8) | Type::kIdI8 ) ||
+ typeOp == ((Type::kIdI32 << 8) | Type::kIdI16) ||
+ typeOp == ((Type::kIdI64 << 8) | Type::kIdI8 ) ||
+ typeOp == ((Type::kIdI64 << 8) | Type::kIdI16)) break;
+
+ // Sign extend by using 'movsxd'.
+ instId = Inst::kIdMovsxd;
+ if (typeOp == ((Type::kIdI64 << 8) | Type::kIdI32)) break;
+ }
+
+ if (Type::isInt(srcTypeId) || src_.isMem()) {
+ // Zero extend by using 'movzx' or 'mov'.
+ if (dstSize <= 4 && srcSize < 4) {
+ instId = Inst::kIdMovzx;
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ }
+ else {
+ // We should have caught all possibilities where `srcSize` is less
+ // than 4, so we don't have to worry about 'movzx' anymore. Minimum
+ // size is enough to determine if we want 32-bit or 64-bit move.
+ instId = Inst::kIdMov;
+ srcSize = Support::min(srcSize, dstSize);
+
+ dst.setSignature(srcSize == 4 ? Reg::signatureOfT<Reg::kTypeGpd>()
+ : Reg::signatureOfT<Reg::kTypeGpq>());
+ if (src.isReg()) src.setSignature(dst.signature());
+ }
+ break;
+ }
+
+ // NOTE: The previous branch caught all memory sources, from here it's
+ // always register to register conversion, so catch the remaining cases.
+ srcSize = Support::min(srcSize, dstSize);
+
+ if (Type::isMmx(srcTypeId)) {
+ // 64-bit move.
+ instId = Inst::kIdMovq;
+ if (srcSize == 8) break;
+
+ // 32-bit move.
+ instId = Inst::kIdMovd;
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ break;
+ }
+
+ if (Type::isMask(srcTypeId)) {
+ instId = x86KmovFromSize(srcSize);
+ dst.setSignature(srcSize <= 4 ? Reg::signatureOfT<Reg::kTypeGpd>()
+ : Reg::signatureOfT<Reg::kTypeGpq>());
+ break;
+ }
+
+ if (Type::isVec(srcTypeId)) {
+ // 64-bit move.
+ instId = avxEnabled ? Inst::kIdVmovq : Inst::kIdMovq;
+ if (srcSize == 8) break;
+
+ // 32-bit move.
+ instId = avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd;
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ break;
+ }
+ }
+
+ if (Type::isMmx(dstTypeId)) {
+ instId = Inst::kIdMovq;
+ srcSize = Support::min(srcSize, dstSize);
+
+ if (Type::isInt(srcTypeId) || src.isMem()) {
+ // 64-bit move.
+ if (srcSize == 8) break;
+
+ // 32-bit move.
+ instId = Inst::kIdMovd;
+ if (src.isReg()) src.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ break;
+ }
+
+ if (Type::isMmx(srcTypeId)) break;
+
+ // This will hurt if `avxEnabled`.
+ instId = Inst::kIdMovdq2q;
+ if (Type::isVec(srcTypeId)) break;
+ }
+
+ if (Type::isMask(dstTypeId)) {
+ srcSize = Support::min(srcSize, dstSize);
+
+ if (Type::isInt(srcTypeId) || Type::isMask(srcTypeId) || src.isMem()) {
+ instId = x86KmovFromSize(srcSize);
+ if (Reg::isGp(src) && srcSize <= 4) src.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ break;
+ }
+ }
+
+ if (Type::isVec(dstTypeId)) {
+ // By default set destination to XMM, will be set to YMM|ZMM if needed.
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeXmm>());
+
+ // This will hurt if `avxEnabled`.
+ if (Reg::isMm(src)) {
+ // 64-bit move.
+ instId = Inst::kIdMovq2dq;
+ break;
+ }
+
+ // Argument conversion.
+ uint32_t dstElement = Type::baseOf(dstTypeId);
+ uint32_t srcElement = Type::baseOf(srcTypeId);
+
+ if (dstElement == Type::kIdF32 && srcElement == Type::kIdF64) {
+ srcSize = Support::min(dstSize * 2, srcSize);
+ dstSize = srcSize / 2;
+
+ if (srcSize <= 8)
+ instId = avxEnabled ? Inst::kIdVcvtss2sd : Inst::kIdCvtss2sd;
+ else
+ instId = avxEnabled ? Inst::kIdVcvtps2pd : Inst::kIdCvtps2pd;
+
+ if (dstSize == 32)
+ dst.setSignature(Reg::signatureOfT<Reg::kTypeYmm>());
+ if (src.isReg())
+ src.setSignature(Reg::signatureOfVecBySize(srcSize));
+ break;
+ }
+
+ if (dstElement == Type::kIdF64 && srcElement == Type::kIdF32) {
+ srcSize = Support::min(dstSize, srcSize * 2) / 2;
+ dstSize = srcSize * 2;
+
+ if (srcSize <= 4)
+ instId = avxEnabled ? Inst::kIdVcvtsd2ss : Inst::kIdCvtsd2ss;
+ else
+ instId = avxEnabled ? Inst::kIdVcvtpd2ps : Inst::kIdCvtpd2ps;
+
+ dst.setSignature(Reg::signatureOfVecBySize(dstSize));
+ if (src.isReg() && srcSize >= 32)
+ src.setSignature(Reg::signatureOfT<Reg::kTypeYmm>());
+ break;
+ }
+
+ srcSize = Support::min(srcSize, dstSize);
+ if (Reg::isGp(src) || src.isMem()) {
+ // 32-bit move.
+ if (srcSize <= 4) {
+ instId = avxEnabled ? Inst::kIdVmovd : Inst::kIdMovd;
+ if (src.isReg()) src.setSignature(Reg::signatureOfT<Reg::kTypeGpd>());
+ break;
+ }
+
+ // 64-bit move.
+ if (srcSize == 8) {
+ instId = avxEnabled ? Inst::kIdVmovq : Inst::kIdMovq;
+ break;
+ }
+ }
+
+ if (Reg::isVec(src) || src.isMem()) {
+ instId = avxEnabled ? Inst::kIdVmovaps : Inst::kIdMovaps;
+ uint32_t sign = Reg::signatureOfVecBySize(srcSize);
+
+ dst.setSignature(sign);
+ if (src.isReg()) src.setSignature(sign);
+ break;
+ }
+ }
+
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ if (src.isMem())
+ src.as<Mem>().setSize(srcSize);
+
+ emitter->setInlineComment(comment);
+ return emitter->emit(instId, dst, src);
+}
+
+// ============================================================================
+// [asmjit::X86Internal - Emit Prolog & Epilog]
+// ============================================================================
+
+static ASMJIT_INLINE void X86Internal_setupSaveRestoreInfo(uint32_t group, const FuncFrame& frame, Reg& xReg, uint32_t& xInst, uint32_t& xSize) noexcept {
+ switch (group) {
+ case Reg::kGroupVec:
+ xReg = xmm(0);
+ xInst = x86GetXmmMovInst(frame);
+ xSize = xReg.size();
+ break;
+ case Reg::kGroupMm:
+ xReg = mm(0);
+ xInst = Inst::kIdMovq;
+ xSize = xReg.size();
+ break;
+ case Reg::kGroupKReg:
+ xReg = k(0);
+ xInst = Inst::kIdKmovq;
+ xSize = xReg.size();
+ break;
+ }
+}
+
+ASMJIT_FAVOR_SIZE Error X86Internal::emitProlog(Emitter* emitter, const FuncFrame& frame) {
+ uint32_t gpSaved = frame.savedRegs(Reg::kGroupGp);
+
+ Gp zsp = emitter->zsp(); // ESP|RSP register.
+ Gp zbp = emitter->zbp(); // EBP|RBP register.
+ Gp gpReg = zsp; // General purpose register (temporary).
+ Gp saReg = zsp; // Stack-arguments base pointer.
+
+ // Emit: 'push zbp'
+ // 'mov zbp, zsp'.
+ if (frame.hasPreservedFP()) {
+ gpSaved &= ~Support::bitMask(Gp::kIdBp);
+ ASMJIT_PROPAGATE(emitter->push(zbp));
+ ASMJIT_PROPAGATE(emitter->mov(zbp, zsp));
+ }
+
+ // Emit: 'push gp' sequence.
+ {
+ Support::BitWordIterator<uint32_t> it(gpSaved);
+ while (it.hasNext()) {
+ gpReg.setId(it.next());
+ ASMJIT_PROPAGATE(emitter->push(gpReg));
+ }
+ }
+
+ // Emit: 'mov saReg, zsp'.
+ uint32_t saRegId = frame.saRegId();
+ if (saRegId != BaseReg::kIdBad && saRegId != Gp::kIdSp) {
+ saReg.setId(saRegId);
+ if (frame.hasPreservedFP()) {
+ if (saRegId != Gp::kIdBp)
+ ASMJIT_PROPAGATE(emitter->mov(saReg, zbp));
+ }
+ else {
+ ASMJIT_PROPAGATE(emitter->mov(saReg, zsp));
+ }
+ }
+
+ // Emit: 'and zsp, StackAlignment'.
+ if (frame.hasDynamicAlignment()) {
+ ASMJIT_PROPAGATE(emitter->and_(zsp, -int32_t(frame.finalStackAlignment())));
+ }
+
+ // Emit: 'sub zsp, StackAdjustment'.
+ if (frame.hasStackAdjustment()) {
+ ASMJIT_PROPAGATE(emitter->sub(zsp, frame.stackAdjustment()));
+ }
+
+ // Emit: 'mov [zsp + DAOffset], saReg'.
+ if (frame.hasDynamicAlignment() && frame.hasDAOffset()) {
+ Mem saMem = ptr(zsp, int32_t(frame.daOffset()));
+ ASMJIT_PROPAGATE(emitter->mov(saMem, saReg));
+ }
+
+ // Emit 'movxxx [zsp + X], {[x|y|z]mm, k}'.
+ {
+ Reg xReg;
+ Mem xBase = ptr(zsp, int32_t(frame.nonGpSaveOffset()));
+
+ uint32_t xInst;
+ uint32_t xSize;
+
+ for (uint32_t group = 1; group < BaseReg::kGroupVirt; group++) {
+ Support::BitWordIterator<uint32_t> it(frame.savedRegs(group));
+ if (it.hasNext()) {
+ X86Internal_setupSaveRestoreInfo(group, frame, xReg, xInst, xSize);
+ do {
+ xReg.setId(it.next());
+ ASMJIT_PROPAGATE(emitter->emit(xInst, xBase, xReg));
+ xBase.addOffsetLo32(int32_t(xSize));
+ } while (it.hasNext());
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE Error X86Internal::emitEpilog(Emitter* emitter, const FuncFrame& frame) {
+ uint32_t i;
+ uint32_t regId;
+
+ uint32_t gpSize = emitter->gpSize();
+ uint32_t gpSaved = frame.savedRegs(Reg::kGroupGp);
+
+ Gp zsp = emitter->zsp(); // ESP|RSP register.
+ Gp zbp = emitter->zbp(); // EBP|RBP register.
+ Gp gpReg = emitter->zsp(); // General purpose register (temporary).
+
+ // Don't emit 'pop zbp' in the pop sequence, this case is handled separately.
+ if (frame.hasPreservedFP())
+ gpSaved &= ~Support::bitMask(Gp::kIdBp);
+
+ // Emit 'movxxx {[x|y|z]mm, k}, [zsp + X]'.
+ {
+ Reg xReg;
+ Mem xBase = ptr(zsp, int32_t(frame.nonGpSaveOffset()));
+
+ uint32_t xInst;
+ uint32_t xSize;
+
+ for (uint32_t group = 1; group < BaseReg::kGroupVirt; group++) {
+ Support::BitWordIterator<uint32_t> it(frame.savedRegs(group));
+ if (it.hasNext()) {
+ X86Internal_setupSaveRestoreInfo(group, frame, xReg, xInst, xSize);
+ do {
+ xReg.setId(it.next());
+ ASMJIT_PROPAGATE(emitter->emit(xInst, xReg, xBase));
+ xBase.addOffsetLo32(int32_t(xSize));
+ } while (it.hasNext());
+ }
+ }
+ }
+
+ // Emit 'emms' and/or 'vzeroupper'.
+ if (frame.hasMmxCleanup()) ASMJIT_PROPAGATE(emitter->emms());
+ if (frame.hasAvxCleanup()) ASMJIT_PROPAGATE(emitter->vzeroupper());
+
+ if (frame.hasPreservedFP()) {
+ // Emit 'mov zsp, zbp' or 'lea zsp, [zbp - x]'
+ int32_t count = int32_t(frame.gpSaveSize() - gpSize);
+ if (!count)
+ ASMJIT_PROPAGATE(emitter->mov(zsp, zbp));
+ else
+ ASMJIT_PROPAGATE(emitter->lea(zsp, ptr(zbp, -count)));
+ }
+ else {
+ if (frame.hasDynamicAlignment() && frame.hasDAOffset()) {
+ // Emit 'mov zsp, [zsp + DsaSlot]'.
+ Mem saMem = ptr(zsp, int32_t(frame.daOffset()));
+ ASMJIT_PROPAGATE(emitter->mov(zsp, saMem));
+ }
+ else if (frame.hasStackAdjustment()) {
+ // Emit 'add zsp, StackAdjustment'.
+ ASMJIT_PROPAGATE(emitter->add(zsp, int32_t(frame.stackAdjustment())));
+ }
+ }
+
+ // Emit 'pop gp' sequence.
+ if (gpSaved) {
+ i = gpSaved;
+ regId = 16;
+
+ do {
+ regId--;
+ if (i & 0x8000) {
+ gpReg.setId(regId);
+ ASMJIT_PROPAGATE(emitter->pop(gpReg));
+ }
+ i <<= 1;
+ } while (regId != 0);
+ }
+
+ // Emit 'pop zbp'.
+ if (frame.hasPreservedFP())
+ ASMJIT_PROPAGATE(emitter->pop(zbp));
+
+ // Emit 'ret' or 'ret x'.
+ if (frame.hasCalleeStackCleanup())
+ ASMJIT_PROPAGATE(emitter->emit(Inst::kIdRet, int(frame.calleeStackCleanup())));
+ else
+ ASMJIT_PROPAGATE(emitter->emit(Inst::kIdRet));
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::X86Internal - Emit Arguments Assignment]
+// ============================================================================
+
+#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
+static void dumpFuncValue(String& sb, uint32_t archId, const FuncValue& value) noexcept {
+ Logging::formatTypeId(sb, value.typeId());
+ sb.appendChar('@');
+ if (value.isReg()) {
+ Logging::formatRegister(sb, 0, nullptr, archId, value.regType(), value.regId());
+ }
+ else if (value.isStack()) {
+ sb.appendFormat("[%d]", value.stackOffset());
+ }
+ else {
+ sb.appendString("<none>");
+ }
+}
+
+static void dumpAssignment(String& sb, const X86FuncArgsContext& ctx) noexcept {
+ typedef X86FuncArgsContext::Var Var;
+
+ uint32_t archId = ctx.archId();
+ uint32_t varCount = ctx.varCount();
+
+ for (uint32_t i = 0; i < varCount; i++) {
+ const Var& var = ctx.var(i);
+ const FuncValue& dst = var.out;
+ const FuncValue& cur = var.cur;
+
+ sb.appendFormat("Var%u: ", i);
+ dumpFuncValue(sb, archId, dst);
+ sb.appendString(" <- ");
+ dumpFuncValue(sb, archId, cur);
+
+ if (var.isDone())
+ sb.appendString(" {Done}");
+
+ sb.appendChar('\n');
+ }
+}
+#endif
+
+ASMJIT_FAVOR_SIZE Error X86Internal::emitArgsAssignment(Emitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
+ typedef X86FuncArgsContext::Var Var;
+ typedef X86FuncArgsContext::WorkData WorkData;
+
+ enum WorkFlags : uint32_t {
+ kWorkNone = 0x00,
+ kWorkDidSome = 0x01,
+ kWorkPending = 0x02,
+ kWorkPostponed = 0x04
+ };
+
+ X86FuncArgsContext ctx;
+ ASMJIT_PROPAGATE(ctx.initWorkData(frame, args));
+
+#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
+ {
+ String sb;
+ dumpAssignment(sb, ctx);
+ printf("%s\n", sb.data());
+ }
+#endif
+
+ uint32_t archId = ctx.archId();
+ uint32_t varCount = ctx._varCount;
+ WorkData* workData = ctx._workData;
+
+ // Use AVX if it's enabled.
+ bool avxEnabled = frame.isAvxEnabled();
+
+ uint32_t saVarId = ctx._saVarId;
+ uint32_t saRegId = Gp::kIdSp;
+
+ if (frame.hasDynamicAlignment()) {
+ if (frame.hasPreservedFP())
+ saRegId = Gp::kIdBp;
+ else
+ saRegId = saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId();
+ }
+
+ // --------------------------------------------------------------------------
+ // Register to stack and stack to stack moves must be first as now we have
+ // the biggest chance of having as many as possible unassigned registers.
+ // --------------------------------------------------------------------------
+
+ if (ctx._stackDstMask) {
+ // Base address of all arguments passed by stack.
+ Mem baseArgPtr = ptr(emitter->gpz(saRegId), int32_t(frame.saOffset(saRegId)));
+ Mem baseStackPtr = ptr(emitter->gpz(Gp::kIdSp), int32_t(0));
+
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+ if (!var.out.isStack()) continue;
+
+ ASMJIT_ASSERT(var.cur.isReg() || var.cur.isStack());
+ Reg reg;
+
+ if (var.cur.isReg()) {
+ WorkData& wd = workData[Reg::groupOf(var.cur.regType())];
+ uint32_t rId = var.cur.regId();
+
+ reg.setSignatureAndId(Reg::signatureOf(var.cur.regType()), rId);
+ wd.unassign(varId, rId);
+ }
+ else {
+ // Stack to reg move - tricky since we move stack to stack we can decide which
+ // register to use. In general we follow the rule that IntToInt moves will use
+ // GP regs with possibility to sign or zero extend, and all other moves will
+ // either use GP or VEC regs depending on the size of the move.
+ RegInfo rInfo = x86GetRegForMemToMemMove(archId, var.out.typeId(), var.cur.typeId());
+ if (ASMJIT_UNLIKELY(!rInfo.isValid()))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ WorkData& wd = workData[rInfo.group()];
+ uint32_t availableRegs = wd.availableRegs();
+ if (ASMJIT_UNLIKELY(!availableRegs))
+ return DebugUtils::errored(kErrorInvalidState);
+
+ uint32_t rId = Support::ctz(availableRegs);
+ reg.setSignatureAndId(rInfo.signature(), rId);
+
+ ASMJIT_PROPAGATE(
+ emitArgMove(emitter,
+ reg,
+ var.out.typeId(),
+ baseArgPtr.cloneAdjusted(var.cur.stackOffset()),
+ var.cur.typeId(),
+ avxEnabled));
+ }
+
+ // Register to stack move.
+ ASMJIT_PROPAGATE(
+ emitRegMove(emitter, baseStackPtr.cloneAdjusted(var.out.stackOffset()), reg, var.cur.typeId(), avxEnabled));
+
+ var.markDone();
+ }
+ }
+
+ // --------------------------------------------------------------------------
+ // Shuffle all registers that are currently assigned accordingly to the assignment.
+ // --------------------------------------------------------------------------
+
+ uint32_t workFlags = kWorkNone;
+ for (;;) {
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+ if (var.isDone() || !var.cur.isReg()) continue;
+
+ uint32_t curType = var.cur.regType();
+ uint32_t outType = var.out.regType();
+
+ uint32_t curGroup = Reg::groupOf(curType);
+ uint32_t outGroup = Reg::groupOf(outType);
+
+ uint32_t curId = var.cur.regId();
+ uint32_t outId = var.out.regId();
+
+ if (curGroup != outGroup) {
+ ASMJIT_ASSERT(false);
+
+ // Requires a conversion between two register groups.
+ if (workData[outGroup]._numSwaps) {
+ // TODO: Postponed
+ workFlags |= kWorkPending;
+ }
+ else {
+ // TODO:
+ workFlags |= kWorkPending;
+ }
+ }
+ else {
+ WorkData& wd = workData[outGroup];
+ if (!wd.isAssigned(outId)) {
+EmitMove:
+ ASMJIT_PROPAGATE(
+ emitArgMove(emitter,
+ Reg::fromTypeAndId(outType, outId), var.out.typeId(),
+ Reg::fromTypeAndId(curType, curId), var.cur.typeId(), avxEnabled));
+
+ wd.reassign(varId, outId, curId);
+ var.cur.initReg(outType, outId, var.out.typeId());
+
+ if (outId == var.out.regId())
+ var.markDone();
+ workFlags |= kWorkDidSome | kWorkPending;
+ }
+ else {
+ uint32_t altId = wd._physToVarId[outId];
+ Var& altVar = ctx._vars[altId];
+
+ if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
+ // Swap operation is possible only between two GP registers.
+ if (curGroup == Reg::kGroupGp) {
+ uint32_t highestType = Support::max(var.cur.regType(), altVar.cur.regType());
+ uint32_t signature = highestType == Reg::kTypeGpq ? Reg::signatureOfT<Reg::kTypeGpq>()
+ : Reg::signatureOfT<Reg::kTypeGpd>();
+
+ ASMJIT_PROPAGATE(emitter->emit(Inst::kIdXchg, Reg(signature, outId), Reg(signature, curId)));
+ wd.swap(varId, curId, altId, outId);
+ var.cur.setRegId(outId);
+ var.markDone();
+ altVar.cur.setRegId(curId);
+
+ if (altVar.out.isInitialized())
+ altVar.markDone();
+ workFlags |= kWorkDidSome;
+ }
+ else {
+ // If there is a scratch register it can be used to perform the swap.
+ uint32_t availableRegs = wd.availableRegs();
+ if (availableRegs) {
+ uint32_t inOutRegs = wd.dstRegs();
+ if (availableRegs & ~inOutRegs)
+ availableRegs &= ~inOutRegs;
+ outId = Support::ctz(availableRegs);
+ goto EmitMove;
+ }
+ else {
+ workFlags |= kWorkPending;
+ }
+ }
+ }
+ else {
+ workFlags |= kWorkPending;
+ }
+ }
+ }
+ }
+
+ if (!(workFlags & kWorkPending))
+ break;
+
+ // If we did nothing twice it means that something is really broken.
+ if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed)
+ return DebugUtils::errored(kErrorInvalidState);
+
+ workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
+ }
+
+ // --------------------------------------------------------------------------
+ // Load arguments passed by stack into registers. This is pretty simple and
+ // it never requires multiple iterations like the previous phase.
+ // --------------------------------------------------------------------------
+
+ if (ctx._hasStackSrc) {
+ uint32_t iterCount = 1;
+ if (frame.hasDynamicAlignment() && !frame.hasPreservedFP())
+ saRegId = saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId();
+
+ // Base address of all arguments passed by stack.
+ Mem baseArgPtr = ptr(emitter->gpz(saRegId), int32_t(frame.saOffset(saRegId)));
+
+ for (uint32_t iter = 0; iter < iterCount; iter++) {
+ for (uint32_t varId = 0; varId < varCount; varId++) {
+ Var& var = ctx._vars[varId];
+ if (var.isDone()) continue;
+
+ if (var.cur.isStack()) {
+ ASMJIT_ASSERT(var.out.isReg());
+
+ uint32_t outId = var.out.regId();
+ uint32_t outType = var.out.regType();
+
+ uint32_t group = Reg::groupOf(outType);
+ WorkData& wd = ctx._workData[group];
+
+ if (outId == saRegId && group == BaseReg::kGroupGp) {
+ // This register will be processed last as we still need `saRegId`.
+ if (iterCount == 1) {
+ iterCount++;
+ continue;
+ }
+ wd.unassign(wd._physToVarId[outId], outId);
+ }
+
+ Reg dstReg = Reg::fromTypeAndId(outType, outId);
+ Mem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
+
+ ASMJIT_PROPAGATE(
+ emitArgMove(emitter,
+ dstReg, var.out.typeId(),
+ srcMem, var.cur.typeId(), avxEnabled));
+
+ wd.assign(varId, outId);
+ var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86internal_p.h b/3rdparty/asmjit/src/asmjit/x86/x86internal_p.h
new file mode 100644
index 00000000000..a1b76963f2a
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86internal_p.h
@@ -0,0 +1,87 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86INTERNAL_P_H_INCLUDED
+#define ASMJIT_X86_X86INTERNAL_P_H_INCLUDED
+
+#include "../core/api-config.h"
+
+#include "../core/func.h"
+#include "../x86/x86emitter.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::X86Internal]
+// ============================================================================
+
+//! X86 utilities used at multiple places, not part of public API, not exported.
+struct X86Internal {
+ //! Initialize `FuncDetail` (X86 specific).
+ static Error initFuncDetail(FuncDetail& func, const FuncSignature& sign, uint32_t gpSize) noexcept;
+
+ //! Initialize `FuncFrame` (X86 specific).
+ static Error initFuncFrame(FuncFrame& frame, const FuncDetail& func) noexcept;
+
+ //! Finalize `FuncFrame` (X86 specific).
+ static Error finalizeFuncFrame(FuncFrame& frame) noexcept;
+
+ static Error argsToFuncFrame(const FuncArgsAssignment& args, FuncFrame& frame) noexcept;
+
+ //! Emit function prolog.
+ static Error emitProlog(Emitter* emitter, const FuncFrame& frame);
+
+ //! Emit function epilog.
+ static Error emitEpilog(Emitter* emitter, const FuncFrame& frame);
+
+ //! Emit a pure move operation between two registers or the same type or
+ //! between a register and its home slot. This function does not handle
+ //! register conversion.
+ static Error emitRegMove(Emitter* emitter,
+ const Operand_& dst_,
+ const Operand_& src_, uint32_t typeId, bool avxEnabled, const char* comment = nullptr);
+
+ //! Emit move from a function argument (either register or stack) to a register.
+ //!
+ //! This function can handle the necessary conversion from one argument to
+ //! another, and from one register type to another, if it's possible. Any
+ //! attempt of conversion that requires third register of a different group
+ //! (for example conversion from K to MMX) will fail.
+ static Error emitArgMove(Emitter* emitter,
+ const Reg& dst_, uint32_t dstTypeId,
+ const Operand_& src_, uint32_t srcTypeId, bool avxEnabled, const char* comment = nullptr);
+
+ static Error emitArgsAssignment(Emitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args);
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86INTERNAL_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86logging.cpp b/3rdparty/asmjit/src/asmjit/x86/x86logging.cpp
new file mode 100644
index 00000000000..cfb91dbf596
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86logging.cpp
@@ -0,0 +1,781 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifndef ASMJIT_NO_LOGGING
+
+#include "../core/misc_p.h"
+#include "../core/support.h"
+#include "../x86/x86instdb_p.h"
+#include "../x86/x86logging_p.h"
+#include "../x86/x86operand.h"
+
+#ifndef ASMJIT_NO_COMPILER
+ #include "../core/compiler.h"
+#endif
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal - Constants]
+// ============================================================================
+
+struct RegFormatInfo {
+ struct TypeEntry {
+ uint8_t index;
+ };
+
+ struct NameEntry {
+ uint8_t count;
+ uint8_t formatIndex;
+ uint8_t specialIndex;
+ uint8_t specialCount;
+ };
+
+ TypeEntry typeEntries[BaseReg::kTypeMax + 1];
+ char typeStrings[128 - 32];
+
+ NameEntry nameEntries[BaseReg::kTypeMax + 1];
+ char nameStrings[280];
+};
+
+template<uint32_t X>
+struct RegFormatInfo_T {
+ enum {
+ kTypeIndex = X == Reg::kTypeGpbLo ? 1 :
+ X == Reg::kTypeGpbHi ? 8 :
+ X == Reg::kTypeGpw ? 15 :
+ X == Reg::kTypeGpd ? 19 :
+ X == Reg::kTypeGpq ? 23 :
+ X == Reg::kTypeXmm ? 27 :
+ X == Reg::kTypeYmm ? 31 :
+ X == Reg::kTypeZmm ? 35 :
+ X == Reg::kTypeMm ? 50 :
+ X == Reg::kTypeKReg ? 53 :
+ X == Reg::kTypeSReg ? 43 :
+ X == Reg::kTypeCReg ? 59 :
+ X == Reg::kTypeDReg ? 62 :
+ X == Reg::kTypeSt ? 47 :
+ X == Reg::kTypeBnd ? 55 :
+ X == Reg::kTypeRip ? 39 : 0,
+
+ kFormatIndex = X == Reg::kTypeGpbLo ? 1 :
+ X == Reg::kTypeGpbHi ? 6 :
+ X == Reg::kTypeGpw ? 11 :
+ X == Reg::kTypeGpd ? 16 :
+ X == Reg::kTypeGpq ? 21 :
+ X == Reg::kTypeXmm ? 25 :
+ X == Reg::kTypeYmm ? 31 :
+ X == Reg::kTypeZmm ? 37 :
+ X == Reg::kTypeMm ? 60 :
+ X == Reg::kTypeKReg ? 65 :
+ X == Reg::kTypeSReg ? 49 :
+ X == Reg::kTypeCReg ? 75 :
+ X == Reg::kTypeDReg ? 80 :
+ X == Reg::kTypeSt ? 55 :
+ X == Reg::kTypeBnd ? 69 :
+ X == Reg::kTypeRip ? 43 : 0,
+
+ kSpecialIndex = X == Reg::kTypeGpbLo ? 96 :
+ X == Reg::kTypeGpbHi ? 128 :
+ X == Reg::kTypeGpw ? 161 :
+ X == Reg::kTypeGpd ? 160 :
+ X == Reg::kTypeGpq ? 192 :
+ X == Reg::kTypeSReg ? 224 :
+ X == Reg::kTypeRip ? 85 : 0,
+
+ kSpecialCount = X == Reg::kTypeGpbLo ? 8 :
+ X == Reg::kTypeGpbHi ? 4 :
+ X == Reg::kTypeGpw ? 8 :
+ X == Reg::kTypeGpd ? 8 :
+ X == Reg::kTypeGpq ? 8 :
+ X == Reg::kTypeSReg ? 7 :
+ X == Reg::kTypeRip ? 1 : 0
+ };
+};
+
+#define ASMJIT_REG_TYPE_ENTRY(TYPE) { \
+ RegFormatInfo_T<TYPE>::kTypeIndex \
+}
+
+#define ASMJIT_REG_NAME_ENTRY(TYPE) { \
+ RegTraits<TYPE>::kCount, \
+ RegFormatInfo_T<TYPE>::kFormatIndex, \
+ RegFormatInfo_T<TYPE>::kSpecialIndex, \
+ RegFormatInfo_T<TYPE>::kSpecialCount \
+}
+
+static const RegFormatInfo x86RegFormatInfo = {
+ // Register type entries and strings.
+ { ASMJIT_LOOKUP_TABLE_32(ASMJIT_REG_TYPE_ENTRY, 0) },
+
+ "\0" // #0
+ "gpb\0\0\0\0" // #1
+ "gpb.hi\0" // #8
+ "gpw\0" // #15
+ "gpd\0" // #19
+ "gpq\0" // #23
+ "xmm\0" // #27
+ "ymm\0" // #31
+ "zmm\0" // #35
+ "rip\0" // #39
+ "seg\0" // #43
+ "st\0" // #47
+ "mm\0" // #50
+ "k\0" // #53
+ "bnd\0" // #55
+ "cr\0" // #59
+ "dr\0", // #62
+
+ // Register name entries and strings.
+ { ASMJIT_LOOKUP_TABLE_32(ASMJIT_REG_NAME_ENTRY, 0) },
+
+ "\0"
+ "r%ub\0" // #1
+ "r%uh\0" // #6
+ "r%uw\0" // #11
+ "r%ud\0" // #16
+ "r%u\0" // #21
+ "xmm%u\0" // #25
+ "ymm%u\0" // #31
+ "zmm%u\0" // #37
+ "rip%u\0" // #43
+ "seg%u\0" // #49
+ "st%u\0" // #55
+ "mm%u\0" // #60
+ "k%u\0" // #65
+ "bnd%u\0" // #69
+ "cr%u\0" // #75
+ "dr%u\0" // #80
+
+ "rip\0" // #85
+ "\0\0\0\0\0\0\0" // #89
+
+ "al\0\0" "cl\0\0" "dl\0\0" "bl\0\0" "spl\0" "bpl\0" "sil\0" "dil\0" // #96
+ "ah\0\0" "ch\0\0" "dh\0\0" "bh\0\0" "n/a\0" "n/a\0" "n/a\0" "n/a\0" // #128
+ "eax\0" "ecx\0" "edx\0" "ebx\0" "esp\0" "ebp\0" "esi\0" "edi\0" // #160
+ "rax\0" "rcx\0" "rdx\0" "rbx\0" "rsp\0" "rbp\0" "rsi\0" "rdi\0" // #192
+ "n/a\0" "es\0\0" "cs\0\0" "ss\0\0" "ds\0\0" "fs\0\0" "gs\0\0" "n/a\0" // #224
+};
+#undef ASMJIT_REG_NAME_ENTRY
+#undef ASMJIT_REG_TYPE_ENTRY
+
+static const char* x86GetAddressSizeString(uint32_t size) noexcept {
+ switch (size) {
+ case 1 : return "byte ";
+ case 2 : return "word ";
+ case 4 : return "dword ";
+ case 6 : return "fword ";
+ case 8 : return "qword ";
+ case 10: return "tword ";
+ case 16: return "oword ";
+ case 32: return "yword ";
+ case 64: return "zword ";
+ default: return "";
+ }
+}
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal - Format Operand]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error LoggingInternal::formatOperand(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const Operand_& op) noexcept {
+
+ if (op.isReg())
+ return formatRegister(sb, flags, emitter, archId, op.as<BaseReg>().type(), op.as<BaseReg>().id());
+
+ if (op.isMem()) {
+ const Mem& m = op.as<Mem>();
+ ASMJIT_PROPAGATE(sb.appendString(x86GetAddressSizeString(m.size())));
+
+ // Segment override prefix.
+ uint32_t seg = m.segmentId();
+ if (seg != SReg::kIdNone && seg < SReg::kIdCount)
+ ASMJIT_PROPAGATE(sb.appendFormat("%s:", x86RegFormatInfo.nameStrings + 224 + seg * 4));
+
+ ASMJIT_PROPAGATE(sb.appendChar('['));
+ switch (m.addrType()) {
+ case BaseMem::kAddrTypeAbs: ASMJIT_PROPAGATE(sb.appendString("abs ")); break;
+ case BaseMem::kAddrTypeRel: ASMJIT_PROPAGATE(sb.appendString("rel ")); break;
+ }
+
+ char opSign = '\0';
+ if (m.hasBase()) {
+ opSign = '+';
+ if (m.hasBaseLabel()) {
+ ASMJIT_PROPAGATE(Logging::formatLabel(sb, flags, emitter, m.baseId()));
+ }
+ else {
+ uint32_t modifiedFlags = flags;
+ if (m.isRegHome()) {
+ ASMJIT_PROPAGATE(sb.appendString("&"));
+ modifiedFlags &= ~FormatOptions::kFlagRegCasts;
+ }
+ ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, archId, m.baseType(), m.baseId()));
+ }
+ }
+
+ if (m.hasIndex()) {
+ if (opSign)
+ ASMJIT_PROPAGATE(sb.appendChar(opSign));
+
+ opSign = '+';
+ ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, archId, m.indexType(), m.indexId()));
+ if (m.hasShift())
+ ASMJIT_PROPAGATE(sb.appendFormat("*%u", 1 << m.shift()));
+ }
+
+ uint64_t off = uint64_t(m.offset());
+ if (off || !m.hasBaseOrIndex()) {
+ if (int64_t(off) < 0) {
+ opSign = '-';
+ off = ~off + 1;
+ }
+
+ if (opSign)
+ ASMJIT_PROPAGATE(sb.appendChar(opSign));
+
+ uint32_t base = 10;
+ if ((flags & FormatOptions::kFlagHexOffsets) != 0 && off > 9) {
+ ASMJIT_PROPAGATE(sb.appendString("0x", 2));
+ base = 16;
+ }
+
+ ASMJIT_PROPAGATE(sb.appendUInt(off, base));
+ }
+
+ return sb.appendChar(']');
+ }
+
+ if (op.isImm()) {
+ const Imm& i = op.as<Imm>();
+ int64_t val = i.i64();
+
+ if ((flags & FormatOptions::kFlagHexImms) != 0 && uint64_t(val) > 9) {
+ ASMJIT_PROPAGATE(sb.appendString("0x", 2));
+ return sb.appendUInt(uint64_t(val), 16);
+ }
+ else {
+ return sb.appendInt(val, 10);
+ }
+ }
+
+ if (op.isLabel()) {
+ return Logging::formatLabel(sb, flags, emitter, op.id());
+ }
+
+ return sb.appendString("<None>");
+}
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal - Format Immediate (Extension)]
+// ============================================================================
+
+static constexpr char kImmCharStart = '{';
+static constexpr char kImmCharEnd = '}';
+static constexpr char kImmCharOr = '|';
+
+struct ImmBits {
+ enum Mode : uint32_t {
+ kModeLookup = 0,
+ kModeFormat = 1
+ };
+
+ uint8_t mask;
+ uint8_t shift;
+ uint8_t mode;
+ char text[48 - 3];
+};
+
+ASMJIT_FAVOR_SIZE static Error LoggingInternal_formatImmShuf(String& sb, uint32_t u8, uint32_t bits, uint32_t count) noexcept {
+ uint32_t mask = (1 << bits) - 1;
+
+ for (uint32_t i = 0; i < count; i++, u8 >>= bits) {
+ uint32_t value = u8 & mask;
+ ASMJIT_PROPAGATE(sb.appendChar(i == 0 ? kImmCharStart : kImmCharOr));
+ ASMJIT_PROPAGATE(sb.appendUInt(value));
+ }
+
+ if (kImmCharEnd)
+ ASMJIT_PROPAGATE(sb.appendChar(kImmCharEnd));
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE static Error LoggingInternal_formatImmBits(String& sb, uint32_t u8, const ImmBits* bits, uint32_t count) noexcept {
+ uint32_t n = 0;
+ char buf[64];
+
+ for (uint32_t i = 0; i < count; i++) {
+ const ImmBits& spec = bits[i];
+
+ uint32_t value = (u8 & uint32_t(spec.mask)) >> spec.shift;
+ const char* str = nullptr;
+
+ switch (spec.mode) {
+ case ImmBits::kModeLookup:
+ str = Support::findPackedString(spec.text, value);
+ break;
+
+ case ImmBits::kModeFormat:
+ snprintf(buf, sizeof(buf), spec.text, unsigned(value));
+ str = buf;
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ if (!str[0])
+ continue;
+
+ ASMJIT_PROPAGATE(sb.appendChar(++n == 1 ? kImmCharStart : kImmCharOr));
+ ASMJIT_PROPAGATE(sb.appendString(str));
+ }
+
+ if (n && kImmCharEnd)
+ ASMJIT_PROPAGATE(sb.appendChar(kImmCharEnd));
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE static Error LoggingInternal_formatImmText(String& sb, uint32_t u8, uint32_t bits, uint32_t advance, const char* text, uint32_t count = 1) noexcept {
+ uint32_t mask = (1u << bits) - 1;
+ uint32_t pos = 0;
+
+ for (uint32_t i = 0; i < count; i++, u8 >>= bits, pos += advance) {
+ uint32_t value = (u8 & mask) + pos;
+ ASMJIT_PROPAGATE(sb.appendChar(i == 0 ? kImmCharStart : kImmCharOr));
+ ASMJIT_PROPAGATE(sb.appendString(Support::findPackedString(text, value)));
+ }
+
+ if (kImmCharEnd)
+ ASMJIT_PROPAGATE(sb.appendChar(kImmCharEnd));
+
+ return kErrorOk;
+}
+
+ASMJIT_FAVOR_SIZE static Error LoggingInternal_explainConst(
+ String& sb,
+ uint32_t flags,
+ uint32_t instId,
+ uint32_t vecSize,
+ const Imm& imm) noexcept {
+
+ DebugUtils::unused(flags);
+
+ static const char vcmpx[] =
+ "EQ_OQ\0" "LT_OS\0" "LE_OS\0" "UNORD_Q\0" "NEQ_UQ\0" "NLT_US\0" "NLE_US\0" "ORD_Q\0"
+ "EQ_UQ\0" "NGE_US\0" "NGT_US\0" "FALSE_OQ\0" "NEQ_OQ\0" "GE_OS\0" "GT_OS\0" "TRUE_UQ\0"
+ "EQ_OS\0" "LT_OQ\0" "LE_OQ\0" "UNORD_S\0" "NEQ_US\0" "NLT_UQ\0" "NLE_UQ\0" "ORD_S\0"
+ "EQ_US\0" "NGE_UQ\0" "NGT_UQ\0" "FALSE_OS\0" "NEQ_OS\0" "GE_OQ\0" "GT_OQ\0" "TRUE_US\0";
+
+ // Why to make it compatible...
+ static const char vpcmpx[] = "EQ\0" "LT\0" "LE\0" "FALSE\0" "NEQ\0" "GE\0" "GT\0" "TRUE\0";
+ static const char vpcomx[] = "LT\0" "LE\0" "GT\0" "GE\0" "EQ\0" "NEQ\0" "FALSE\0" "TRUE\0";
+
+ static const char vshufpd[] = "A0\0A1\0B0\0B1\0A2\0A3\0B2\0B3\0A4\0A5\0B4\0B5\0A6\0A7\0B6\0B7\0";
+ static const char vshufps[] = "A0\0A1\0A2\0A3\0A0\0A1\0A2\0A3\0B0\0B1\0B2\0B3\0B0\0B1\0B2\0B3\0";
+
+ static const ImmBits vfpclassxx[] = {
+ { 0x07u, 0, ImmBits::kModeLookup, "QNAN\0" "+0\0" "-0\0" "+INF\0" "-INF\0" "DENORMAL\0" "-FINITE\0" "SNAN\0" }
+ };
+
+ static const ImmBits vfixupimmxx[] = {
+ { 0x01u, 0, ImmBits::kModeLookup, "\0" "+INF_IE\0" },
+ { 0x02u, 1, ImmBits::kModeLookup, "\0" "-VE_IE\0" },
+ { 0x04u, 2, ImmBits::kModeLookup, "\0" "-INF_IE\0" },
+ { 0x08u, 3, ImmBits::kModeLookup, "\0" "SNAN_IE\0" },
+ { 0x10u, 4, ImmBits::kModeLookup, "\0" "ONE_IE\0" },
+ { 0x20u, 5, ImmBits::kModeLookup, "\0" "ONE_ZE\0" },
+ { 0x40u, 6, ImmBits::kModeLookup, "\0" "ZERO_IE\0" },
+ { 0x80u, 7, ImmBits::kModeLookup, "\0" "ZERO_ZE\0" }
+ };
+
+ static const ImmBits vgetmantxx[] = {
+ { 0x03u, 0, ImmBits::kModeLookup, "[1, 2)\0" "[.5, 2)\0" "[.5, 1)\0" "[.75, 1.5)\0" },
+ { 0x04u, 2, ImmBits::kModeLookup, "\0" "NO_SIGN\0" },
+ { 0x08u, 3, ImmBits::kModeLookup, "\0" "QNAN_IF_SIGN\0" }
+ };
+
+ static const ImmBits vmpsadbw[] = {
+ { 0x04u, 2, ImmBits::kModeLookup, "BLK1[0]\0" "BLK1[1]\0" },
+ { 0x03u, 0, ImmBits::kModeLookup, "BLK2[0]\0" "BLK2[1]\0" "BLK2[2]\0" "BLK2[3]\0" },
+ { 0x40u, 6, ImmBits::kModeLookup, "BLK1[4]\0" "BLK1[5]\0" },
+ { 0x30u, 4, ImmBits::kModeLookup, "BLK2[4]\0" "BLK2[5]\0" "BLK2[6]\0" "BLK2[7]\0" }
+ };
+
+ static const ImmBits vpclmulqdq[] = {
+ { 0x01u, 0, ImmBits::kModeLookup, "LQ\0" "HQ\0" },
+ { 0x10u, 4, ImmBits::kModeLookup, "LQ\0" "HQ\0" }
+ };
+
+ static const ImmBits vperm2x128[] = {
+ { 0x0Bu, 0, ImmBits::kModeLookup, "A0\0" "A1\0" "B0\0" "B1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" },
+ { 0xB0u, 4, ImmBits::kModeLookup, "A0\0" "A1\0" "B0\0" "B1\0" "\0" "\0" "\0" "\0" "0\0" "0\0" "0\0" "0\0" }
+ };
+
+ static const ImmBits vrangexx[] = {
+ { 0x03u, 0, ImmBits::kModeLookup, "MIN\0" "MAX\0" "MIN_ABS\0" "MAX_ABS\0" },
+ { 0x0Cu, 2, ImmBits::kModeLookup, "SIGN_A\0" "SIGN_B\0" "SIGN_0\0" "SIGN_1\0" }
+ };
+
+ static const ImmBits vreducexx_vrndscalexx[] = {
+ { 0x07u, 0, ImmBits::kModeLookup, "\0" "\0" "\0" "\0" "ROUND\0" "FLOOR\0" "CEIL\0" "TRUNC\0" },
+ { 0x08u, 3, ImmBits::kModeLookup, "\0" "SAE\0" },
+ { 0xF0u, 4, ImmBits::kModeFormat, "LEN=%d" }
+ };
+
+ static const ImmBits vroundxx[] = {
+ { 0x07u, 0, ImmBits::kModeLookup, "ROUND\0" "FLOOR\0" "CEIL\0" "TRUNC\0" "\0" "\0" "\0" "\0" },
+ { 0x08u, 3, ImmBits::kModeLookup, "\0" "INEXACT\0" }
+ };
+
+ uint32_t u8 = imm.u8();
+ switch (instId) {
+ case Inst::kIdVblendpd:
+ case Inst::kIdBlendpd:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, vecSize / 8);
+
+ case Inst::kIdVblendps:
+ case Inst::kIdBlendps:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, vecSize / 4);
+
+ case Inst::kIdVcmppd:
+ case Inst::kIdVcmpps:
+ case Inst::kIdVcmpsd:
+ case Inst::kIdVcmpss:
+ return LoggingInternal_formatImmText(sb, u8, 5, 0, vcmpx);
+
+ case Inst::kIdCmppd:
+ case Inst::kIdCmpps:
+ case Inst::kIdCmpsd:
+ case Inst::kIdCmpss:
+ return LoggingInternal_formatImmText(sb, u8, 3, 0, vcmpx);
+
+ case Inst::kIdVdbpsadbw:
+ return LoggingInternal_formatImmShuf(sb, u8, 2, 4);
+
+ case Inst::kIdVdppd:
+ case Inst::kIdVdpps:
+ case Inst::kIdDppd:
+ case Inst::kIdDpps:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, 8);
+
+ case Inst::kIdVmpsadbw:
+ case Inst::kIdMpsadbw:
+ return LoggingInternal_formatImmBits(sb, u8, vmpsadbw, Support::min<uint32_t>(vecSize / 8, 4));
+
+ case Inst::kIdVpblendw:
+ case Inst::kIdPblendw:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, 8);
+
+ case Inst::kIdVpblendd:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, Support::min<uint32_t>(vecSize / 4, 8));
+
+ case Inst::kIdVpclmulqdq:
+ case Inst::kIdPclmulqdq:
+ return LoggingInternal_formatImmBits(sb, u8, vpclmulqdq, ASMJIT_ARRAY_SIZE(vpclmulqdq));
+
+ case Inst::kIdVroundpd:
+ case Inst::kIdVroundps:
+ case Inst::kIdVroundsd:
+ case Inst::kIdVroundss:
+ case Inst::kIdRoundpd:
+ case Inst::kIdRoundps:
+ case Inst::kIdRoundsd:
+ case Inst::kIdRoundss:
+ return LoggingInternal_formatImmBits(sb, u8, vroundxx, ASMJIT_ARRAY_SIZE(vroundxx));
+
+ case Inst::kIdVshufpd:
+ case Inst::kIdShufpd:
+ return LoggingInternal_formatImmText(sb, u8, 1, 2, vshufpd, Support::min<uint32_t>(vecSize / 8, 8));
+
+ case Inst::kIdVshufps:
+ case Inst::kIdShufps:
+ return LoggingInternal_formatImmText(sb, u8, 2, 4, vshufps, 4);
+
+ case Inst::kIdVcvtps2ph:
+ return LoggingInternal_formatImmBits(sb, u8, vroundxx, 1);
+
+ case Inst::kIdVperm2f128:
+ case Inst::kIdVperm2i128:
+ return LoggingInternal_formatImmBits(sb, u8, vperm2x128, ASMJIT_ARRAY_SIZE(vperm2x128));
+
+ case Inst::kIdVpermilpd:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, vecSize / 8);
+
+ case Inst::kIdVpermilps:
+ return LoggingInternal_formatImmShuf(sb, u8, 2, 4);
+
+ case Inst::kIdVpshufd:
+ case Inst::kIdPshufd:
+ return LoggingInternal_formatImmShuf(sb, u8, 2, 4);
+
+ case Inst::kIdVpshufhw:
+ case Inst::kIdVpshuflw:
+ case Inst::kIdPshufhw:
+ case Inst::kIdPshuflw:
+ case Inst::kIdPshufw:
+ return LoggingInternal_formatImmShuf(sb, u8, 2, 4);
+
+ case Inst::kIdVfixupimmpd:
+ case Inst::kIdVfixupimmps:
+ case Inst::kIdVfixupimmsd:
+ case Inst::kIdVfixupimmss:
+ return LoggingInternal_formatImmBits(sb, u8, vfixupimmxx, ASMJIT_ARRAY_SIZE(vfixupimmxx));
+
+ case Inst::kIdVfpclasspd:
+ case Inst::kIdVfpclassps:
+ case Inst::kIdVfpclasssd:
+ case Inst::kIdVfpclassss:
+ return LoggingInternal_formatImmBits(sb, u8, vfpclassxx, ASMJIT_ARRAY_SIZE(vfpclassxx));
+
+ case Inst::kIdVgetmantpd:
+ case Inst::kIdVgetmantps:
+ case Inst::kIdVgetmantsd:
+ case Inst::kIdVgetmantss:
+ return LoggingInternal_formatImmBits(sb, u8, vgetmantxx, ASMJIT_ARRAY_SIZE(vgetmantxx));
+
+ case Inst::kIdVpcmpb:
+ case Inst::kIdVpcmpd:
+ case Inst::kIdVpcmpq:
+ case Inst::kIdVpcmpw:
+ case Inst::kIdVpcmpub:
+ case Inst::kIdVpcmpud:
+ case Inst::kIdVpcmpuq:
+ case Inst::kIdVpcmpuw:
+ return LoggingInternal_formatImmText(sb, u8, 3, 0, vpcmpx);
+
+ case Inst::kIdVpcomb:
+ case Inst::kIdVpcomd:
+ case Inst::kIdVpcomq:
+ case Inst::kIdVpcomw:
+ case Inst::kIdVpcomub:
+ case Inst::kIdVpcomud:
+ case Inst::kIdVpcomuq:
+ case Inst::kIdVpcomuw:
+ return LoggingInternal_formatImmText(sb, u8, 3, 0, vpcomx);
+
+ case Inst::kIdVpermq:
+ case Inst::kIdVpermpd:
+ return LoggingInternal_formatImmShuf(sb, u8, 2, 4);
+
+ case Inst::kIdVpternlogd:
+ case Inst::kIdVpternlogq:
+ return LoggingInternal_formatImmShuf(sb, u8, 1, 8);
+
+ case Inst::kIdVrangepd:
+ case Inst::kIdVrangeps:
+ case Inst::kIdVrangesd:
+ case Inst::kIdVrangess:
+ return LoggingInternal_formatImmBits(sb, u8, vrangexx, ASMJIT_ARRAY_SIZE(vrangexx));
+
+ case Inst::kIdVreducepd:
+ case Inst::kIdVreduceps:
+ case Inst::kIdVreducesd:
+ case Inst::kIdVreducess:
+ case Inst::kIdVrndscalepd:
+ case Inst::kIdVrndscaleps:
+ case Inst::kIdVrndscalesd:
+ case Inst::kIdVrndscaless:
+ return LoggingInternal_formatImmBits(sb, u8, vreducexx_vrndscalexx, ASMJIT_ARRAY_SIZE(vreducexx_vrndscalexx));
+
+ case Inst::kIdVshuff32x4:
+ case Inst::kIdVshuff64x2:
+ case Inst::kIdVshufi32x4:
+ case Inst::kIdVshufi64x2: {
+ uint32_t count = Support::max<uint32_t>(vecSize / 16, 2u);
+ uint32_t bits = count <= 2 ? 1u : 2u;
+ return LoggingInternal_formatImmShuf(sb, u8, bits, count);
+ }
+
+ default:
+ return kErrorOk;
+ }
+}
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal - Format Register]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error LoggingInternal::formatRegister(String& sb, uint32_t flags, const BaseEmitter* emitter, uint32_t archId, uint32_t rType, uint32_t rId) noexcept {
+ DebugUtils::unused(archId);
+ const RegFormatInfo& info = x86RegFormatInfo;
+
+#ifndef ASMJIT_NO_COMPILER
+ if (Operand::isVirtId(rId)) {
+ if (emitter && emitter->emitterType() == BaseEmitter::kTypeCompiler) {
+ const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
+ if (cc->isVirtIdValid(rId)) {
+ VirtReg* vReg = cc->virtRegById(rId);
+ ASMJIT_ASSERT(vReg != nullptr);
+
+ const char* name = vReg->name();
+ if (name && name[0] != '\0')
+ ASMJIT_PROPAGATE(sb.appendString(name));
+ else
+ ASMJIT_PROPAGATE(sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(rId))));
+
+ if (vReg->type() != rType && rType <= BaseReg::kTypeMax && (flags & FormatOptions::kFlagRegCasts) != 0) {
+ const RegFormatInfo::TypeEntry& typeEntry = info.typeEntries[rType];
+ if (typeEntry.index)
+ ASMJIT_PROPAGATE(sb.appendFormat("@%s", info.typeStrings + typeEntry.index));
+ }
+
+ return kErrorOk;
+ }
+ }
+ }
+#else
+ DebugUtils::unused(emitter, flags);
+#endif
+
+ if (ASMJIT_LIKELY(rType <= BaseReg::kTypeMax)) {
+ const RegFormatInfo::NameEntry& nameEntry = info.nameEntries[rType];
+
+ if (rId < nameEntry.specialCount)
+ return sb.appendString(info.nameStrings + nameEntry.specialIndex + rId * 4);
+
+ if (rId < nameEntry.count)
+ return sb.appendFormat(info.nameStrings + nameEntry.formatIndex, unsigned(rId));
+
+ const RegFormatInfo::TypeEntry& typeEntry = info.typeEntries[rType];
+ if (typeEntry.index)
+ return sb.appendFormat("%s@%u", info.typeStrings + typeEntry.index, rId);
+ }
+
+ return sb.appendFormat("Reg?%u@%u", rType, rId);
+}
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal - Format Instruction]
+// ============================================================================
+
+ASMJIT_FAVOR_SIZE Error LoggingInternal::formatInstruction(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept {
+
+ uint32_t instId = inst.id();
+ uint32_t options = inst.options();
+
+ // Format instruction options and instruction mnemonic.
+ if (instId < Inst::_kIdCount) {
+ // SHORT|LONG options.
+ if (options & Inst::kOptionShortForm) ASMJIT_PROPAGATE(sb.appendString("short "));
+ if (options & Inst::kOptionLongForm) ASMJIT_PROPAGATE(sb.appendString("long "));
+
+ // LOCK|XACQUIRE|XRELEASE options.
+ if (options & Inst::kOptionXAcquire) ASMJIT_PROPAGATE(sb.appendString("xacquire "));
+ if (options & Inst::kOptionXRelease) ASMJIT_PROPAGATE(sb.appendString("xrelease "));
+ if (options & Inst::kOptionLock) ASMJIT_PROPAGATE(sb.appendString("lock "));
+
+ // REP|REPNE options.
+ if (options & (Inst::kOptionRep | Inst::kOptionRepne)) {
+ sb.appendString((options & Inst::kOptionRep) ? "rep " : "repnz ");
+ if (inst.hasExtraReg()) {
+ ASMJIT_PROPAGATE(sb.appendString("{"));
+ ASMJIT_PROPAGATE(formatOperand(sb, flags, emitter, archId, inst.extraReg().toReg<BaseReg>()));
+ ASMJIT_PROPAGATE(sb.appendString("} "));
+ }
+ }
+
+ // REX options.
+ if (options & Inst::kOptionRex) {
+ const uint32_t kRXBWMask = Inst::kOptionOpCodeR |
+ Inst::kOptionOpCodeX |
+ Inst::kOptionOpCodeB |
+ Inst::kOptionOpCodeW ;
+ if (options & kRXBWMask) {
+ sb.appendString("rex.");
+ if (options & Inst::kOptionOpCodeR) sb.appendChar('r');
+ if (options & Inst::kOptionOpCodeX) sb.appendChar('x');
+ if (options & Inst::kOptionOpCodeB) sb.appendChar('b');
+ if (options & Inst::kOptionOpCodeW) sb.appendChar('w');
+ sb.appendChar(' ');
+ }
+ else {
+ ASMJIT_PROPAGATE(sb.appendString("rex "));
+ }
+ }
+
+ // VEX|EVEX options.
+ if (options & Inst::kOptionVex3) ASMJIT_PROPAGATE(sb.appendString("vex3 "));
+ if (options & Inst::kOptionEvex) ASMJIT_PROPAGATE(sb.appendString("evex "));
+
+ ASMJIT_PROPAGATE(InstAPI::instIdToString(archId, instId, sb));
+ }
+ else {
+ ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
+ }
+
+ for (uint32_t i = 0; i < opCount; i++) {
+ const Operand_& op = operands[i];
+ if (op.isNone()) break;
+
+ ASMJIT_PROPAGATE(sb.appendString(i == 0 ? " " : ", "));
+ ASMJIT_PROPAGATE(formatOperand(sb, flags, emitter, archId, op));
+
+ if (op.isImm() && (flags & FormatOptions::kFlagExplainImms)) {
+ uint32_t vecSize = 16;
+ for (uint32_t j = 0; j < opCount; j++)
+ if (operands[j].isReg())
+ vecSize = Support::max<uint32_t>(vecSize, operands[j].size());
+ ASMJIT_PROPAGATE(LoggingInternal_explainConst(sb, flags, instId, vecSize, op.as<Imm>()));
+ }
+
+ // Support AVX-512 masking - {k}{z}.
+ if (i == 0) {
+ if (inst.extraReg().group() == Reg::kGroupKReg) {
+ ASMJIT_PROPAGATE(sb.appendString(" {"));
+ ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, archId, inst.extraReg().type(), inst.extraReg().id()));
+ ASMJIT_PROPAGATE(sb.appendChar('}'));
+
+ if (options & Inst::kOptionZMask)
+ ASMJIT_PROPAGATE(sb.appendString("{z}"));
+ }
+ else if (options & Inst::kOptionZMask) {
+ ASMJIT_PROPAGATE(sb.appendString(" {z}"));
+ }
+ }
+
+ // Support AVX-512 broadcast - {1tox}.
+ if (op.isMem() && op.as<Mem>().hasBroadcast()) {
+ ASMJIT_PROPAGATE(sb.appendFormat(" {1to%u}", Support::bitMask(op.as<Mem>().getBroadcast())));
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // !ASMJIT_NO_LOGGING
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86logging_p.h b/3rdparty/asmjit/src/asmjit/x86/x86logging_p.h
new file mode 100644
index 00000000000..c0d7d16e3f2
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86logging_p.h
@@ -0,0 +1,72 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86LOGGING_P_H_INCLUDED
+#define ASMJIT_X86_X86LOGGING_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_LOGGING
+
+#include "../core/logging.h"
+#include "../core/string.h"
+#include "../x86/x86globals.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::LoggingInternal]
+// ============================================================================
+
+namespace LoggingInternal {
+ Error formatRegister(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ uint32_t regType,
+ uint32_t regId) noexcept;
+
+ Error formatOperand(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const Operand_& op) noexcept;
+
+ Error formatInstruction(
+ String& sb,
+ uint32_t flags,
+ const BaseEmitter* emitter,
+ uint32_t archId,
+ const BaseInst& inst, const Operand_* operands, uint32_t opCount) noexcept;
+};
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // !ASMJIT_NO_LOGGING
+#endif // ASMJIT_X86_X86LOGGING_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h b/3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h
new file mode 100644
index 00000000000..69cafe5be7c
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86opcode_p.h
@@ -0,0 +1,452 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86OPCODE_P_H_INCLUDED
+#define ASMJIT_X86_X86OPCODE_P_H_INCLUDED
+
+#include "../core/logging.h"
+#include "../core/string.h"
+#include "../x86/x86globals.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::Opcode]
+// ============================================================================
+
+//! Helper class to store and manipulate X86 opcodes.
+//!
+//! The first 8 least significant bits describe the opcode byte as defined in
+//! ISA manuals, all other bits describe other properties like prefixes, see
+//! `Opcode::Bits` for more information.
+struct Opcode {
+ uint32_t v;
+
+ //! Describes a meaning of all bits of AsmJit's 32-bit opcode value.
+ //!
+ //! This schema is AsmJit specific and has been designed to allow encoding of
+ //! all X86 instructions available. X86, MMX, and SSE+ instructions always use
+ //! `MM` and `PP` fields, which are encoded to corresponding prefixes needed
+ //! by X86 or SIMD instructions. AVX+ instructions embed `MMMMM` and `PP` fields
+ //! in a VEX prefix, and AVX-512 instructions embed `MM` and `PP` in EVEX prefix.
+ //!
+ //! The instruction opcode definition uses 1 or 2 bytes as an opcode value. 1
+ //! byte is needed by most of the instructions, 2 bytes are only used by legacy
+ //! X87-FPU instructions. This means that a second byte is free to by used by
+ //! instructions encoded by using VEX and/or EVEX prefix.
+ //!
+ //! The fields description:
+ //!
+ //! - `MM` field is used to encode prefixes needed by the instruction or as
+ //! a part of VEX/EVEX prefix. Described as `mm` and `mmmmm` in instruction
+ //! manuals.
+ //!
+ //! NOTE: Since `MM` field is defined as `mmmmm` (5 bits), but only 2 least
+ //! significant bits are used by VEX and EVEX prefixes, and additional 4th
+ //! bit is used by XOP prefix, AsmJit uses the 3rd and 5th bit for it's own
+ //! purposes. These bits will probably never be used in future encodings as
+ //! AVX512 uses only `000mm` from `mmmmm`.
+ //!
+ //! - `PP` field is used to encode prefixes needed by the instruction or as a
+ //! part of VEX/EVEX prefix. Described as `pp` in instruction manuals.
+ //!
+ //! - `LL` field is used exclusively by AVX+ and AVX512+ instruction sets. It
+ //! describes vector size, which is `L.128` for XMM register, `L.256` for
+ //! for YMM register, and `L.512` for ZMM register. The `LL` field is omitted
+ //! in case that instruction supports multiple vector lengths, however, if the
+ //! instruction requires specific `L` value it must be specified as a part of
+ //! the opcode.
+ //!
+ //! NOTE: `LL` having value `11` is not defined yet.
+ //!
+ //! - `W` field is the most complicated. It was added by 64-bit architecture
+ //! to promote default operation width (instructions that perform 32-bit
+ //! operation by default require to override the width to 64-bit explicitly).
+ //! There is nothing wrong on this, however, some instructions introduced
+ //! implicit `W` override, for example a `cdqe` instruction is basically a
+ //! `cwde` instruction with overridden `W` (set to 1). There are some others
+ //! in the base X86 instruction set. More recent instruction sets started
+ //! using `W` field more often:
+ //!
+ //! - AVX instructions started using `W` field as an extended opcode for FMA,
+ //! GATHER, PERM, and other instructions. It also uses `W` field to override
+ //! the default operation width in instructions like `vmovq`.
+ //!
+ //! - AVX-512 instructions started using `W` field as an extended opcode for
+ //! all new instructions. This wouldn't have been an issue if the `W` field
+ //! of AVX-512 have matched AVX, but this is not always the case.
+ //!
+ //! - `O` field is an extended opcode field (3 bits) embedded in ModR/M BYTE.
+ //!
+ //! - `CDSHL` and `CDTT` fields describe 'compressed-displacement'. `CDSHL` is
+ //! defined for each instruction that is AVX-512 encodable (EVEX) and contains
+ //! a base N shift (base shift to perform the calculation). The `CDTT` field
+ //! is derived from instruction specification and describes additional shift
+ //! to calculate the final `CDSHL` that will be used in SIB byte.
+ //!
+ //! \note Don't reorder any fields here, the shifts and masks were defined
+ //! carefully to make encoding of X86 instructions fast, especially to construct
+ //! REX, VEX, and EVEX prefixes in the most efficient way. Changing values defined
+ //! by these enums many cause AsmJit to emit invalid binary representations of
+ //! instructions passed to `x86::Assembler::_emit`.
+ enum Bits : uint32_t {
+ // MM & VEX & EVEX & XOP
+ // ---------------------
+ //
+ // Two meanings:
+ // * Part of a legacy opcode (prefixes emitted before the main opcode byte).
+ // * `MMMMM` field in VEX|EVEX|XOP instruction.
+ //
+ // AVX reserves 5 bits for `MMMMM` field, however AVX instructions only use
+ // 2 bits and XOP 3 bits. AVX-512 shrinks `MMMMM` field into `MM` so it's
+ // safe to assume that bits [4:2] of `MM` field won't be used in future
+ // extensions, which will most probably use EVEX encoding. AsmJit divides
+ // MM field into this layout:
+ //
+ // [1:0] - Used to describe 0F, 0F38 and 0F3A legacy prefix bytes and
+ // 2 bits of MM field.
+ // [2] - Used to force 3-BYTE VEX prefix, but then cleared to zero before
+ // the prefix is emitted. This bit is not used by any instruction
+ // so it can be used for any purpose by AsmJit. Also, this bit is
+ // used as an extension to `MM` field describing 0F|0F38|0F3A to also
+ // describe 0F01 as used by some legacy instructions (instructions
+ // not using VEX/EVEX prefix).
+ // [3] - Required by XOP instructions, so we use this bit also to indicate
+ // that this is a XOP opcode.
+ kMM_Shift = 8,
+ kMM_Mask = 0x1Fu << kMM_Shift,
+ kMM_00 = 0x00u << kMM_Shift,
+ kMM_0F = 0x01u << kMM_Shift,
+ kMM_0F38 = 0x02u << kMM_Shift,
+ kMM_0F3A = 0x03u << kMM_Shift, // Described also as XOP.M3 in AMD manuals.
+ kMM_0F01 = 0x04u << kMM_Shift, // AsmJit way to describe 0F01 (never VEX/EVEX).
+
+ // `XOP` field is only used to force XOP prefix instead of VEX3 prefix. We
+ // know that only XOP encoding uses bit 0b1000 of MM field and that no VEX
+ // and EVEX instruction uses such bit, so we can use this bit to force XOP
+ // prefix to be emitted instead of VEX3 prefix. See `x86VEXPrefix` defined
+ // in `x86assembler.cpp`.
+ kMM_XOP08 = 0x08u << kMM_Shift, // XOP.M8.
+ kMM_XOP09 = 0x09u << kMM_Shift, // XOP.M9.
+ kMM_XOP0A = 0x0Au << kMM_Shift, // XOP.MA.
+
+ kMM_IsXOP_Shift= kMM_Shift + 3,
+ kMM_IsXOP = kMM_XOP08,
+
+ // NOTE: Force VEX3 allows to force to emit VEX3 instead of VEX2 in some
+ // cases (similar to forcing REX prefix). Force EVEX will force emitting
+ // EVEX prefix instead of VEX2|VEX3. EVEX-only instructions will have
+ // ForceEvex always set, however. instructions that can be encoded by
+ // either VEX or EVEX prefix should not have ForceEvex set.
+
+ kMM_ForceVex3 = 0x04u << kMM_Shift, // Force 3-BYTE VEX prefix.
+ kMM_ForceEvex = 0x10u << kMM_Shift, // Force 4-BYTE EVEX prefix.
+
+ // FPU_2B - Second-Byte of the Opcode used by FPU
+ // ----------------------------------------------
+ //
+ // Second byte opcode. This BYTE is ONLY used by FPU instructions and
+ // collides with 3 bits from `MM` and 5 bits from 'CDSHL' and 'CDTT'.
+ // It's fine as FPU and AVX512 flags are never used at the same time.
+ kFPU_2B_Shift = 10,
+ kFPU_2B_Mask = 0xFF << kFPU_2B_Shift,
+
+ // CDSHL & CDTT
+ // ------------
+ //
+ // Compressed displacement bits.
+ //
+ // Each opcode defines the base size (N) shift:
+ // [0]: BYTE (1 byte).
+ // [1]: WORD (2 bytes).
+ // [2]: DWORD (4 bytes - float/int32).
+ // [3]: QWORD (8 bytes - double/int64).
+ // [4]: OWORD (16 bytes - used by FV|FVM|M128).
+ //
+ // Which is then scaled by the instruction's TT (TupleType) into possible:
+ // [5]: YWORD (32 bytes)
+ // [6]: ZWORD (64 bytes)
+ //
+ // These bits are then adjusted before calling EmitModSib or EmitModVSib.
+ kCDSHL_Shift = 13,
+ kCDSHL_Mask = 0x7u << kCDSHL_Shift,
+
+ kCDSHL__ = 0x0u << kCDSHL_Shift, // Base element size not used.
+ kCDSHL_0 = 0x0u << kCDSHL_Shift, // N << 0.
+ kCDSHL_1 = 0x1u << kCDSHL_Shift, // N << 1.
+ kCDSHL_2 = 0x2u << kCDSHL_Shift, // N << 2.
+ kCDSHL_3 = 0x3u << kCDSHL_Shift, // N << 3.
+ kCDSHL_4 = 0x4u << kCDSHL_Shift, // N << 4.
+ kCDSHL_5 = 0x5u << kCDSHL_Shift, // N << 5.
+
+ // Compressed displacement tuple-type (specific to AsmJit).
+ //
+ // Since we store the base offset independently of CDTT we can simplify the
+ // number of 'TUPLE_TYPE' groups significantly and just handle special cases.
+ kCDTT_Shift = 16,
+ kCDTT_Mask = 0x3u << kCDTT_Shift,
+ kCDTT_None = 0x0u << kCDTT_Shift, // Does nothing.
+ kCDTT_ByLL = 0x1u << kCDTT_Shift, // Scales by LL (1x 2x 4x).
+ kCDTT_T1W = 0x2u << kCDTT_Shift, // Used to add 'W' to the shift.
+ kCDTT_DUP = 0x3u << kCDTT_Shift, // Special 'VMOVDDUP' case.
+
+ // Aliases that match names used in instruction manuals.
+ kCDTT__ = kCDTT_None,
+ kCDTT_FV = kCDTT_ByLL,
+ kCDTT_HV = kCDTT_ByLL,
+ kCDTT_FVM = kCDTT_ByLL,
+ kCDTT_T1S = kCDTT_None,
+ kCDTT_T1F = kCDTT_None,
+ kCDTT_T1_4X = kCDTT_None,
+ kCDTT_T2 = kCDTT_None,
+ kCDTT_T4 = kCDTT_None,
+ kCDTT_T8 = kCDTT_None,
+ kCDTT_HVM = kCDTT_ByLL,
+ kCDTT_QVM = kCDTT_ByLL,
+ kCDTT_OVM = kCDTT_ByLL,
+ kCDTT_128 = kCDTT_None,
+
+ kCDTT_T4X = kCDTT_T1_4X, // Alias to have only 3 letters.
+
+ // `O` Field in MorR/M
+ // -------------------
+
+ kO_Shift = 18,
+ kO_Mask = 0x7u << kO_Shift,
+
+ kO__ = 0x0u,
+ kO_0 = 0x0u << kO_Shift,
+ kO_1 = 0x1u << kO_Shift,
+ kO_2 = 0x2u << kO_Shift,
+ kO_3 = 0x3u << kO_Shift,
+ kO_4 = 0x4u << kO_Shift,
+ kO_5 = 0x5u << kO_Shift,
+ kO_6 = 0x6u << kO_Shift,
+ kO_7 = 0x7u << kO_Shift,
+
+ // `PP` Field
+ // ----------
+ //
+ // These fields are stored deliberately right after each other as it makes
+ // it easier to construct VEX prefix from the opcode value stored in the
+ // instruction database.
+ //
+ // Two meanings:
+ // * "PP" field in AVX/XOP/AVX-512 instruction.
+ // * Mandatory Prefix in legacy encoding.
+ //
+ // AVX reserves 2 bits for `PP` field, but AsmJit extends the storage by 1
+ // more bit that is used to emit 9B prefix for some X87-FPU instructions.
+
+ kPP_Shift = 21,
+ kPP_VEXMask = 0x03u << kPP_Shift, // PP field mask used by VEX/EVEX.
+ kPP_FPUMask = 0x07u << kPP_Shift, // Mask used by EMIT_PP, also includes '0x9B'.
+ kPP_00 = 0x00u << kPP_Shift,
+ kPP_66 = 0x01u << kPP_Shift,
+ kPP_F3 = 0x02u << kPP_Shift,
+ kPP_F2 = 0x03u << kPP_Shift,
+
+ kPP_9B = 0x07u << kPP_Shift, // AsmJit specific to emit FPU's '9B' byte.
+
+ // REX|VEX|EVEX B|X|R|W Bits
+ // -------------------------
+ //
+ // NOTE: REX.[B|X|R] are never stored within the opcode itself, they are
+ // reserved by AsmJit are are added dynamically to the opcode to represent
+ // [REX|VEX|EVEX].[B|X|R] bits. REX.W can be stored in DB as it's sometimes
+ // part of the opcode itself.
+
+ // These must be binary compatible with instruction options.
+ kREX_Shift = 24,
+ kREX_Mask = 0x0Fu << kREX_Shift,
+ kB = 0x01u << kREX_Shift, // Never stored in DB, used by encoder.
+ kX = 0x02u << kREX_Shift, // Never stored in DB, used by encoder.
+ kR = 0x04u << kREX_Shift, // Never stored in DB, used by encoder.
+ kW = 0x08u << kREX_Shift,
+ kW_Shift = kREX_Shift + 3,
+
+ kW__ = 0u << kW_Shift, // REX.W/VEX.W is unspecified.
+ kW_x = 0u << kW_Shift, // REX.W/VEX.W is based on instruction operands.
+ kW_I = 0u << kW_Shift, // REX.W/VEX.W is ignored (WIG).
+ kW_0 = 0u << kW_Shift, // REX.W/VEX.W is 0 (W0).
+ kW_1 = 1u << kW_Shift, // REX.W/VEX.W is 1 (W1).
+
+ // EVEX.W Field
+ // ------------
+ //
+ // `W` field used by EVEX instruction encoding.
+
+ kEvex_W_Shift = 28,
+ kEvex_W_Mask = 1u << kEvex_W_Shift,
+
+ kEvex_W__ = 0u << kEvex_W_Shift, // EVEX.W is unspecified (not EVEX instruction).
+ kEvex_W_x = 0u << kEvex_W_Shift, // EVEX.W is based on instruction operands.
+ kEvex_W_I = 0u << kEvex_W_Shift, // EVEX.W is ignored (WIG).
+ kEvex_W_0 = 0u << kEvex_W_Shift, // EVEX.W is 0 (W0).
+ kEvex_W_1 = 1u << kEvex_W_Shift, // EVEX.W is 1 (W1).
+
+ // `L` or `LL` field in AVX/XOP/AVX-512
+ // ------------------------------------
+ //
+ // VEX/XOP prefix can only use the first bit `L.128` or `L.256`. EVEX prefix
+ // prefix makes it possible to use also `L.512`.
+ //
+ // If the instruction set manual describes an instruction by `LIG` it means
+ // that the `L` field is ignored and AsmJit defaults to `0` in such case.
+ kLL_Shift = 29,
+ kLL_Mask = 0x3u << kLL_Shift,
+
+ kLL__ = 0x0u << kLL_Shift, // LL is unspecified.
+ kLL_x = 0x0u << kLL_Shift, // LL is based on instruction operands.
+ kLL_I = 0x0u << kLL_Shift, // LL is ignored (LIG).
+ kLL_0 = 0x0u << kLL_Shift, // LL is 0 (L.128).
+ kLL_1 = 0x1u << kLL_Shift, // LL is 1 (L.256).
+ kLL_2 = 0x2u << kLL_Shift, // LL is 2 (L.512).
+
+ // Opcode Combinations
+ // -------------------
+
+ k0 = 0, // '__' (no prefix, used internally).
+ k000000 = kPP_00 | kMM_00, // '__' (no prefix, to be the same width as others).
+ k000F00 = kPP_00 | kMM_0F, // '0F'
+ k000F01 = kPP_00 | kMM_0F01, // '0F01'
+ k000F0F = kPP_00 | kMM_0F, // '0F0F' - 3DNOW, equal to 0x0F, must have special encoding to take effect.
+ k000F38 = kPP_00 | kMM_0F38, // '0F38'
+ k000F3A = kPP_00 | kMM_0F3A, // '0F3A'
+ k660000 = kPP_66 | kMM_00, // '66'
+ k660F00 = kPP_66 | kMM_0F, // '660F'
+ k660F38 = kPP_66 | kMM_0F38, // '660F38'
+ k660F3A = kPP_66 | kMM_0F3A, // '660F3A'
+ kF20000 = kPP_F2 | kMM_00, // 'F2'
+ kF20F00 = kPP_F2 | kMM_0F, // 'F20F'
+ kF20F38 = kPP_F2 | kMM_0F38, // 'F20F38'
+ kF20F3A = kPP_F2 | kMM_0F3A, // 'F20F3A'
+ kF30000 = kPP_F3 | kMM_00, // 'F3'
+ kF30F00 = kPP_F3 | kMM_0F, // 'F30F'
+ kF30F38 = kPP_F3 | kMM_0F38, // 'F30F38'
+ kF30F3A = kPP_F3 | kMM_0F3A, // 'F30F3A'
+ kFPU_00 = kPP_00 | kMM_00, // '__' (FPU)
+ kFPU_9B = kPP_9B | kMM_00, // '9B' (FPU)
+ kXOP_M8 = kPP_00 | kMM_XOP08, // 'M8' (XOP)
+ kXOP_M9 = kPP_00 | kMM_XOP09, // 'M9' (XOP)
+ kXOP_MA = kPP_00 | kMM_XOP0A // 'MA' (XOP)
+ };
+
+ // --------------------------------------------------------------------------
+ // [Opcode Builder]
+ // --------------------------------------------------------------------------
+
+ ASMJIT_INLINE uint32_t get() const noexcept { return v; }
+
+ ASMJIT_INLINE bool hasW() const noexcept { return (v & kW) != 0; }
+ ASMJIT_INLINE bool has66h() const noexcept { return (v & kPP_66) != 0; }
+
+ ASMJIT_INLINE Opcode& add(uint32_t x) noexcept { return operator+=(x); }
+
+ ASMJIT_INLINE Opcode& add66h() noexcept { return operator|=(kPP_66); }
+ template<typename T>
+ ASMJIT_INLINE Opcode& add66hIf(T exp) noexcept { return operator|=(uint32_t(exp) << kPP_Shift); }
+ template<typename T>
+ ASMJIT_INLINE Opcode& add66hBySize(T size) noexcept { return add66hIf(size == 2); }
+
+ ASMJIT_INLINE Opcode& addW() noexcept { return operator|=(kW); }
+ template<typename T>
+ ASMJIT_INLINE Opcode& addWIf(T exp) noexcept { return operator|=(uint32_t(exp) << kW_Shift); }
+ template<typename T>
+ ASMJIT_INLINE Opcode& addWBySize(T size) noexcept { return addWIf(size == 8); }
+
+ template<typename T>
+ ASMJIT_INLINE Opcode& addPrefixBySize(T size) noexcept {
+ static const uint32_t mask[16] = {
+ 0, // #0
+ 0, // #1 -> nothing (already handled or not possible)
+ kPP_66, // #2 -> 66H
+ 0, // #3
+ 0, // #4 -> nothing
+ 0, // #5
+ 0, // #6
+ 0, // #7
+ kW // #8 -> REX.W
+ };
+ return operator|=(mask[size & 0xF]);
+ }
+
+ template<typename T>
+ ASMJIT_INLINE Opcode& addArithBySize(T size) noexcept {
+ static const uint32_t mask[16] = {
+ 0, // #0
+ 0, // #1 -> nothing
+ 1 | kPP_66, // #2 -> NOT_BYTE_OP(1) and 66H
+ 0, // #3
+ 1, // #4 -> NOT_BYTE_OP(1)
+ 0, // #5
+ 0, // #6
+ 0, // #7
+ 1 | kW // #8 -> NOT_BYTE_OP(1) and REX.W
+ };
+ return operator|=(mask[size & 0xF]);
+ }
+
+ //! Extract `O` field from the opcode.
+ ASMJIT_INLINE uint32_t extractO() const noexcept {
+ return (v >> kO_Shift) & 0x07;
+ }
+
+ //! Extract `REX` prefix from opcode combined with `options`.
+ ASMJIT_INLINE uint32_t extractRex(uint32_t options) const noexcept {
+ // kREX was designed in a way that when shifted there will be no bytes
+ // set except REX.[B|X|R|W]. The returned value forms a real REX prefix byte.
+ // This case should be unit-tested as well.
+ return (v | options) >> kREX_Shift;
+ }
+
+ ASMJIT_INLINE uint32_t extractLLMM(uint32_t options) const noexcept {
+ uint32_t x = v & (kLL_Mask | kMM_Mask);
+ uint32_t y = options & (Inst::kOptionVex3 | Inst::kOptionEvex);
+ return (x | y) >> kMM_Shift;
+ }
+
+ ASMJIT_INLINE Opcode& operator=(uint32_t x) noexcept { v = x; return *this; }
+ ASMJIT_INLINE Opcode& operator+=(uint32_t x) noexcept { v += x; return *this; }
+ ASMJIT_INLINE Opcode& operator-=(uint32_t x) noexcept { v -= x; return *this; }
+ ASMJIT_INLINE Opcode& operator&=(uint32_t x) noexcept { v &= x; return *this; }
+ ASMJIT_INLINE Opcode& operator|=(uint32_t x) noexcept { v |= x; return *this; }
+ ASMJIT_INLINE Opcode& operator^=(uint32_t x) noexcept { v ^= x; return *this; }
+
+ ASMJIT_INLINE uint32_t operator&(uint32_t x) const noexcept { return v & x; }
+ ASMJIT_INLINE uint32_t operator|(uint32_t x) const noexcept { return v | x; }
+ ASMJIT_INLINE uint32_t operator^(uint32_t x) const noexcept { return v ^ x; }
+ ASMJIT_INLINE uint32_t operator<<(uint32_t x) const noexcept { return v << x; }
+ ASMJIT_INLINE uint32_t operator>>(uint32_t x) const noexcept { return v >> x; }
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_X86_X86OPCODE_P_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86operand.cpp b/3rdparty/asmjit/src/asmjit/x86/x86operand.cpp
new file mode 100644
index 00000000000..ca7ce5a8c66
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86operand.cpp
@@ -0,0 +1,271 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#ifdef ASMJIT_BUILD_X86
+
+#include "../core/misc_p.h"
+#include "../x86/x86operand.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::OpData]
+// ============================================================================
+
+const OpData opData = {
+ {
+ // RegInfo[]
+ #define VALUE(X) { RegTraits<X>::kSignature }
+ { ASMJIT_LOOKUP_TABLE_32(VALUE, 0) },
+ #undef VALUE
+
+ // RegCount[]
+ #define VALUE(X) RegTraits<X>::kCount
+ { ASMJIT_LOOKUP_TABLE_32(VALUE, 0) },
+ #undef VALUE
+
+ // RegTypeToTypeId[]
+ #define VALUE(X) RegTraits<X>::kTypeId
+ { ASMJIT_LOOKUP_TABLE_32(VALUE, 0) }
+ #undef VALUE
+ }
+};
+
+// ============================================================================
+// [asmjit::x86::Operand - Unit]
+// ============================================================================
+
+#if defined(ASMJIT_TEST)
+UNIT(x86_operand) {
+ Label L(1000); // Label with some ID.
+
+ INFO("Checking basic properties of built-in X86 registers");
+ EXPECT(gpb(Gp::kIdAx) == al);
+ EXPECT(gpb(Gp::kIdBx) == bl);
+ EXPECT(gpb(Gp::kIdCx) == cl);
+ EXPECT(gpb(Gp::kIdDx) == dl);
+
+ EXPECT(gpb_lo(Gp::kIdAx) == al);
+ EXPECT(gpb_lo(Gp::kIdBx) == bl);
+ EXPECT(gpb_lo(Gp::kIdCx) == cl);
+ EXPECT(gpb_lo(Gp::kIdDx) == dl);
+
+ EXPECT(gpb_hi(Gp::kIdAx) == ah);
+ EXPECT(gpb_hi(Gp::kIdBx) == bh);
+ EXPECT(gpb_hi(Gp::kIdCx) == ch);
+ EXPECT(gpb_hi(Gp::kIdDx) == dh);
+
+ EXPECT(gpw(Gp::kIdAx) == ax);
+ EXPECT(gpw(Gp::kIdBx) == bx);
+ EXPECT(gpw(Gp::kIdCx) == cx);
+ EXPECT(gpw(Gp::kIdDx) == dx);
+
+ EXPECT(gpd(Gp::kIdAx) == eax);
+ EXPECT(gpd(Gp::kIdBx) == ebx);
+ EXPECT(gpd(Gp::kIdCx) == ecx);
+ EXPECT(gpd(Gp::kIdDx) == edx);
+
+ EXPECT(gpq(Gp::kIdAx) == rax);
+ EXPECT(gpq(Gp::kIdBx) == rbx);
+ EXPECT(gpq(Gp::kIdCx) == rcx);
+ EXPECT(gpq(Gp::kIdDx) == rdx);
+
+ EXPECT(gpb(Gp::kIdAx) != dl);
+ EXPECT(gpw(Gp::kIdBx) != cx);
+ EXPECT(gpd(Gp::kIdCx) != ebx);
+ EXPECT(gpq(Gp::kIdDx) != rax);
+
+ INFO("Checking if x86::reg(...) matches built-in IDs");
+ EXPECT(gpb(5) == bpl);
+ EXPECT(gpw(5) == bp);
+ EXPECT(gpd(5) == ebp);
+ EXPECT(gpq(5) == rbp);
+ EXPECT(st(5) == st5);
+ EXPECT(mm(5) == mm5);
+ EXPECT(k(5) == k5);
+ EXPECT(cr(5) == cr5);
+ EXPECT(dr(5) == dr5);
+ EXPECT(xmm(5) == xmm5);
+ EXPECT(ymm(5) == ymm5);
+ EXPECT(zmm(5) == zmm5);
+
+ INFO("Checking x86::Gp register properties");
+ EXPECT(Gp().isReg() == true);
+ EXPECT(eax.isReg() == true);
+ EXPECT(eax.id() == 0);
+ EXPECT(eax.size() == 4);
+ EXPECT(eax.type() == Reg::kTypeGpd);
+ EXPECT(eax.group() == Reg::kGroupGp);
+
+ INFO("Checking x86::Xmm register properties");
+ EXPECT(Xmm().isReg() == true);
+ EXPECT(xmm4.isReg() == true);
+ EXPECT(xmm4.id() == 4);
+ EXPECT(xmm4.size() == 16);
+ EXPECT(xmm4.type() == Reg::kTypeXmm);
+ EXPECT(xmm4.group() == Reg::kGroupVec);
+ EXPECT(xmm4.isVec());
+
+ INFO("Checking x86::Ymm register properties");
+ EXPECT(Ymm().isReg() == true);
+ EXPECT(ymm5.isReg() == true);
+ EXPECT(ymm5.id() == 5);
+ EXPECT(ymm5.size() == 32);
+ EXPECT(ymm5.type() == Reg::kTypeYmm);
+ EXPECT(ymm5.group() == Reg::kGroupVec);
+ EXPECT(ymm5.isVec());
+
+ INFO("Checking x86::Zmm register properties");
+ EXPECT(Zmm().isReg() == true);
+ EXPECT(zmm6.isReg() == true);
+ EXPECT(zmm6.id() == 6);
+ EXPECT(zmm6.size() == 64);
+ EXPECT(zmm6.type() == Reg::kTypeZmm);
+ EXPECT(zmm6.group() == Reg::kGroupVec);
+ EXPECT(zmm6.isVec());
+
+ INFO("Checking x86::Vec register properties");
+ EXPECT(Vec().isReg() == true);
+ // Converts a VEC register to a type of the passed register, but keeps the ID.
+ EXPECT(xmm4.cloneAs(ymm10) == ymm4);
+ EXPECT(xmm4.cloneAs(zmm11) == zmm4);
+ EXPECT(ymm5.cloneAs(xmm12) == xmm5);
+ EXPECT(ymm5.cloneAs(zmm13) == zmm5);
+ EXPECT(zmm6.cloneAs(xmm14) == xmm6);
+ EXPECT(zmm6.cloneAs(ymm15) == ymm6);
+
+ EXPECT(xmm7.xmm() == xmm7);
+ EXPECT(xmm7.ymm() == ymm7);
+ EXPECT(xmm7.zmm() == zmm7);
+
+ EXPECT(ymm7.xmm() == xmm7);
+ EXPECT(ymm7.ymm() == ymm7);
+ EXPECT(ymm7.zmm() == zmm7);
+
+ EXPECT(zmm7.xmm() == xmm7);
+ EXPECT(zmm7.ymm() == ymm7);
+ EXPECT(zmm7.zmm() == zmm7);
+
+ INFO("Checking x86::FpMm register properties");
+ EXPECT(Mm().isReg() == true);
+ EXPECT(mm2.isReg() == true);
+ EXPECT(mm2.id() == 2);
+ EXPECT(mm2.size() == 8);
+ EXPECT(mm2.type() == Reg::kTypeMm);
+ EXPECT(mm2.group() == Reg::kGroupMm);
+
+ INFO("Checking x86::KReg register properties");
+ EXPECT(KReg().isReg() == true);
+ EXPECT(k3.isReg() == true);
+ EXPECT(k3.id() == 3);
+ EXPECT(k3.size() == 0);
+ EXPECT(k3.type() == Reg::kTypeKReg);
+ EXPECT(k3.group() == Reg::kGroupKReg);
+
+ INFO("Checking x86::St register properties");
+ EXPECT(St().isReg() == true);
+ EXPECT(st1.isReg() == true);
+ EXPECT(st1.id() == 1);
+ EXPECT(st1.size() == 10);
+ EXPECT(st1.type() == Reg::kTypeSt);
+ EXPECT(st1.group() == Reg::kGroupSt);
+
+ INFO("Checking if default constructed regs behave as expected");
+ EXPECT(Reg().isValid() == false);
+ EXPECT(Gp().isValid() == false);
+ EXPECT(Xmm().isValid() == false);
+ EXPECT(Ymm().isValid() == false);
+ EXPECT(Zmm().isValid() == false);
+ EXPECT(Mm().isValid() == false);
+ EXPECT(KReg().isValid() == false);
+ EXPECT(SReg().isValid() == false);
+ EXPECT(CReg().isValid() == false);
+ EXPECT(DReg().isValid() == false);
+ EXPECT(St().isValid() == false);
+ EXPECT(Bnd().isValid() == false);
+
+ INFO("Checking x86::Mem operand");
+ Mem m;
+ EXPECT(m == Mem(), "Two default constructed x86::Mem operands must be equal");
+
+ m = ptr(L);
+ EXPECT(m.hasBase() == true);
+ EXPECT(m.hasBaseReg() == false);
+ EXPECT(m.hasBaseLabel() == true);
+ EXPECT(m.hasOffset() == false);
+ EXPECT(m.isOffset64Bit() == false);
+ EXPECT(m.offset() == 0);
+ EXPECT(m.offsetLo32() == 0);
+
+ m = ptr(0x0123456789ABCDEFu);
+ EXPECT(m.hasBase() == false);
+ EXPECT(m.hasBaseReg() == false);
+ EXPECT(m.hasIndex() == false);
+ EXPECT(m.hasIndexReg() == false);
+ EXPECT(m.hasOffset() == true);
+ EXPECT(m.isOffset64Bit() == true);
+ EXPECT(m.offset() == int64_t(0x0123456789ABCDEFu));
+ EXPECT(m.offsetLo32() == int32_t(0x89ABCDEFu));
+ m.addOffset(1);
+ EXPECT(m.offset() == int64_t(0x0123456789ABCDF0u));
+
+ m = ptr(0x0123456789ABCDEFu, rdi, 4);
+ EXPECT(m.hasBase() == false);
+ EXPECT(m.hasBaseReg() == false);
+ EXPECT(m.hasIndex() == true);
+ EXPECT(m.hasIndexReg() == true);
+ EXPECT(m.indexType() == rdi.type());
+ EXPECT(m.indexId() == rdi.id());
+ EXPECT(m.hasOffset() == true);
+ EXPECT(m.isOffset64Bit() == true);
+ EXPECT(m.offset() == int64_t(0x0123456789ABCDEFu));
+ EXPECT(m.offsetLo32() == int32_t(0x89ABCDEFu));
+ m.resetIndex();
+ EXPECT(m.hasIndex() == false);
+ EXPECT(m.hasIndexReg() == false);
+
+ m = ptr(rax);
+ EXPECT(m.hasBase() == true);
+ EXPECT(m.hasBaseReg() == true);
+ EXPECT(m.baseType() == rax.type());
+ EXPECT(m.baseId() == rax.id());
+ EXPECT(m.hasIndex() == false);
+ EXPECT(m.hasIndexReg() == false);
+ EXPECT(m.indexType() == 0);
+ EXPECT(m.indexId() == 0);
+ EXPECT(m.hasOffset() == false);
+ EXPECT(m.isOffset64Bit() == false);
+ EXPECT(m.offset() == 0);
+ EXPECT(m.offsetLo32() == 0);
+ m.setIndex(rsi);
+ EXPECT(m.hasIndex() == true);
+ EXPECT(m.hasIndexReg() == true);
+ EXPECT(m.indexType() == rsi.type());
+ EXPECT(m.indexId() == rsi.id());
+}
+#endif
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86operand.h b/3rdparty/asmjit/src/asmjit/x86/x86operand.h
new file mode 100644
index 00000000000..7e220214271
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86operand.h
@@ -0,0 +1,1060 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86OPERAND_H_INCLUDED
+#define ASMJIT_X86_X86OPERAND_H_INCLUDED
+
+#include "../core/arch.h"
+#include "../core/operand.h"
+#include "../core/type.h"
+#include "../x86/x86globals.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [Forward Declarations]
+// ============================================================================
+
+class Reg;
+class Mem;
+
+class Gp;
+class Gpb;
+class GpbLo;
+class GpbHi;
+class Gpw;
+class Gpd;
+class Gpq;
+class Vec;
+class Xmm;
+class Ymm;
+class Zmm;
+class Mm;
+class KReg;
+class SReg;
+class CReg;
+class DReg;
+class St;
+class Bnd;
+class Rip;
+
+//! \addtogroup asmjit_x86
+//! \{
+
+// ============================================================================
+// [asmjit::x86::RegTraits]
+// ============================================================================
+
+//! Register traits (X86).
+//!
+//! Register traits contains information about a particular register type. It's
+//! used by asmjit to setup register information on-the-fly and to populate
+//! tables that contain register information (this way it's possible to change
+//! register types and groups without having to reorder these tables).
+template<uint32_t REG_TYPE>
+struct RegTraits : public BaseRegTraits {};
+
+//! \cond
+// <--------------------+-----+-------------------------+------------------------+---+---+----------------+
+// | Reg | Reg-Type | Reg-Group |Sz |Cnt| TypeId |
+// <--------------------+-----+-------------------------+------------------------+---+---+----------------+
+ASMJIT_DEFINE_REG_TRAITS(GpbLo, BaseReg::kTypeGp8Lo , BaseReg::kGroupGp , 1 , 16, Type::kIdI8 );
+ASMJIT_DEFINE_REG_TRAITS(GpbHi, BaseReg::kTypeGp8Hi , BaseReg::kGroupGp , 1 , 4 , Type::kIdI8 );
+ASMJIT_DEFINE_REG_TRAITS(Gpw , BaseReg::kTypeGp16 , BaseReg::kGroupGp , 2 , 16, Type::kIdI16 );
+ASMJIT_DEFINE_REG_TRAITS(Gpd , BaseReg::kTypeGp32 , BaseReg::kGroupGp , 4 , 16, Type::kIdI32 );
+ASMJIT_DEFINE_REG_TRAITS(Gpq , BaseReg::kTypeGp64 , BaseReg::kGroupGp , 8 , 16, Type::kIdI64 );
+ASMJIT_DEFINE_REG_TRAITS(Xmm , BaseReg::kTypeVec128 , BaseReg::kGroupVec , 16, 32, Type::kIdI32x4 );
+ASMJIT_DEFINE_REG_TRAITS(Ymm , BaseReg::kTypeVec256 , BaseReg::kGroupVec , 32, 32, Type::kIdI32x8 );
+ASMJIT_DEFINE_REG_TRAITS(Zmm , BaseReg::kTypeVec512 , BaseReg::kGroupVec , 64, 32, Type::kIdI32x16);
+ASMJIT_DEFINE_REG_TRAITS(Mm , BaseReg::kTypeOther0 , BaseReg::kGroupOther0 , 8 , 8 , Type::kIdMmx64 );
+ASMJIT_DEFINE_REG_TRAITS(KReg , BaseReg::kTypeOther1 , BaseReg::kGroupOther1 , 0 , 8 , Type::kIdVoid );
+ASMJIT_DEFINE_REG_TRAITS(SReg , BaseReg::kTypeCustom + 0, BaseReg::kGroupVirt + 0, 2 , 7 , Type::kIdVoid );
+ASMJIT_DEFINE_REG_TRAITS(CReg , BaseReg::kTypeCustom + 1, BaseReg::kGroupVirt + 1, 0 , 16, Type::kIdVoid );
+ASMJIT_DEFINE_REG_TRAITS(DReg , BaseReg::kTypeCustom + 2, BaseReg::kGroupVirt + 2, 0 , 16, Type::kIdVoid );
+ASMJIT_DEFINE_REG_TRAITS(St , BaseReg::kTypeCustom + 3, BaseReg::kGroupVirt + 3, 10, 8 , Type::kIdF80 );
+ASMJIT_DEFINE_REG_TRAITS(Bnd , BaseReg::kTypeCustom + 4, BaseReg::kGroupVirt + 4, 16, 4 , Type::kIdVoid );
+ASMJIT_DEFINE_REG_TRAITS(Rip , BaseReg::kTypeIP , BaseReg::kGroupVirt + 5, 0 , 1 , Type::kIdVoid );
+//! \endcond
+
+// ============================================================================
+// [asmjit::x86::Reg]
+// ============================================================================
+
+//! Register (X86).
+class Reg : public BaseReg {
+public:
+ ASMJIT_DEFINE_ABSTRACT_REG(Reg, BaseReg)
+
+ //! Register type.
+ enum RegType : uint32_t {
+ kTypeNone = BaseReg::kTypeNone, //!< No register type or invalid register.
+ kTypeGpbLo = BaseReg::kTypeGp8Lo, //!< Low GPB register (AL, BL, CL, DL, ...).
+ kTypeGpbHi = BaseReg::kTypeGp8Hi, //!< High GPB register (AH, BH, CH, DH only).
+ kTypeGpw = BaseReg::kTypeGp16, //!< GPW register.
+ kTypeGpd = BaseReg::kTypeGp32, //!< GPD register.
+ kTypeGpq = BaseReg::kTypeGp64, //!< GPQ register (64-bit).
+ kTypeXmm = BaseReg::kTypeVec128, //!< XMM register (SSE+).
+ kTypeYmm = BaseReg::kTypeVec256, //!< YMM register (AVX+).
+ kTypeZmm = BaseReg::kTypeVec512, //!< ZMM register (AVX512+).
+ kTypeMm = BaseReg::kTypeOther0, //!< MMX register.
+ kTypeKReg = BaseReg::kTypeOther1, //!< K register (AVX512+).
+ kTypeSReg = BaseReg::kTypeCustom+0, //!< Segment register (None, ES, CS, SS, DS, FS, GS).
+ kTypeCReg = BaseReg::kTypeCustom+1, //!< Control register (CR).
+ kTypeDReg = BaseReg::kTypeCustom+2, //!< Debug register (DR).
+ kTypeSt = BaseReg::kTypeCustom+3, //!< FPU (x87) register.
+ kTypeBnd = BaseReg::kTypeCustom+4, //!< Bound register (BND).
+ kTypeRip = BaseReg::kTypeIP, //!< Instruction pointer (EIP, RIP).
+ kTypeCount = BaseReg::kTypeCustom+5 //!< Count of register types.
+ };
+
+ //! Register group.
+ enum RegGroup : uint32_t {
+ kGroupGp = BaseReg::kGroupGp, //!< GP register group or none (universal).
+ kGroupVec = BaseReg::kGroupVec, //!< XMM|YMM|ZMM register group (universal).
+ kGroupMm = BaseReg::kGroupOther0, //!< MMX register group (legacy).
+ kGroupKReg = BaseReg::kGroupOther1, //!< K register group.
+
+ // These are not managed by BaseCompiler nor used by Func-API:
+ kGroupSReg = BaseReg::kGroupVirt+0, //!< Segment register group.
+ kGroupCReg = BaseReg::kGroupVirt+1, //!< Control register group.
+ kGroupDReg = BaseReg::kGroupVirt+2, //!< Debug register group.
+ kGroupSt = BaseReg::kGroupVirt+3, //!< FPU register group.
+ kGroupBnd = BaseReg::kGroupVirt+4, //!< Bound register group.
+ kGroupRip = BaseReg::kGroupVirt+5, //!< Instrucion pointer (IP).
+ kGroupCount //!< Count of all register groups.
+ };
+
+ //! Tests whether the register is a GPB register (8-bit).
+ constexpr bool isGpb() const noexcept { return size() == 1; }
+ //! Tests whether the register is a low GPB register (8-bit).
+ constexpr bool isGpbLo() const noexcept { return hasSignature(RegTraits<kTypeGpbLo>::kSignature); }
+ //! Tests whether the register is a high GPB register (8-bit).
+ constexpr bool isGpbHi() const noexcept { return hasSignature(RegTraits<kTypeGpbHi>::kSignature); }
+ //! Tests whether the register is a GPW register (16-bit).
+ constexpr bool isGpw() const noexcept { return hasSignature(RegTraits<kTypeGpw>::kSignature); }
+ //! Tests whether the register is a GPD register (32-bit).
+ constexpr bool isGpd() const noexcept { return hasSignature(RegTraits<kTypeGpd>::kSignature); }
+ //! Tests whether the register is a GPQ register (64-bit).
+ constexpr bool isGpq() const noexcept { return hasSignature(RegTraits<kTypeGpq>::kSignature); }
+ //! Tests whether the register is an XMM register (128-bit).
+ constexpr bool isXmm() const noexcept { return hasSignature(RegTraits<kTypeXmm>::kSignature); }
+ //! Tests whether the register is a YMM register (256-bit).
+ constexpr bool isYmm() const noexcept { return hasSignature(RegTraits<kTypeYmm>::kSignature); }
+ //! Tests whether the register is a ZMM register (512-bit).
+ constexpr bool isZmm() const noexcept { return hasSignature(RegTraits<kTypeZmm>::kSignature); }
+ //! Tests whether the register is an MMX register (64-bit).
+ constexpr bool isMm() const noexcept { return hasSignature(RegTraits<kTypeMm>::kSignature); }
+ //! Tests whether the register is a K register (64-bit).
+ constexpr bool isKReg() const noexcept { return hasSignature(RegTraits<kTypeKReg>::kSignature); }
+ //! Tests whether the register is a segment register.
+ constexpr bool isSReg() const noexcept { return hasSignature(RegTraits<kTypeSReg>::kSignature); }
+ //! Tests whether the register is a control register.
+ constexpr bool isCReg() const noexcept { return hasSignature(RegTraits<kTypeCReg>::kSignature); }
+ //! Tests whether the register is a debug register.
+ constexpr bool isDReg() const noexcept { return hasSignature(RegTraits<kTypeDReg>::kSignature); }
+ //! Tests whether the register is an FPU register (80-bit).
+ constexpr bool isSt() const noexcept { return hasSignature(RegTraits<kTypeSt>::kSignature); }
+ //! Tests whether the register is a bound register.
+ constexpr bool isBnd() const noexcept { return hasSignature(RegTraits<kTypeBnd>::kSignature); }
+ //! Tests whether the register is RIP.
+ constexpr bool isRip() const noexcept { return hasSignature(RegTraits<kTypeRip>::kSignature); }
+
+ template<uint32_t REG_TYPE>
+ inline void setRegT(uint32_t rId) noexcept {
+ setSignature(RegTraits<REG_TYPE>::kSignature);
+ setId(rId);
+ }
+
+ inline void setTypeAndId(uint32_t rType, uint32_t rId) noexcept {
+ ASMJIT_ASSERT(rType < kTypeCount);
+ setSignature(signatureOf(rType));
+ setId(rId);
+ }
+
+ static inline uint32_t groupOf(uint32_t rType) noexcept;
+ template<uint32_t REG_TYPE>
+ static inline uint32_t groupOfT() noexcept { return RegTraits<REG_TYPE>::kGroup; }
+
+ static inline uint32_t typeIdOf(uint32_t rType) noexcept;
+ template<uint32_t REG_TYPE>
+ static inline uint32_t typeIdOfT() noexcept { return RegTraits<REG_TYPE>::kTypeId; }
+
+ static inline uint32_t signatureOf(uint32_t rType) noexcept;
+ template<uint32_t REG_TYPE>
+ static inline uint32_t signatureOfT() noexcept { return RegTraits<REG_TYPE>::kSignature; }
+
+ static inline uint32_t signatureOfVecByType(uint32_t typeId) noexcept {
+ return typeId <= Type::_kIdVec128End ? RegTraits<kTypeXmm>::kSignature :
+ typeId <= Type::_kIdVec256End ? RegTraits<kTypeYmm>::kSignature : RegTraits<kTypeZmm>::kSignature;
+ }
+
+ static inline uint32_t signatureOfVecBySize(uint32_t size) noexcept {
+ return size <= 16 ? RegTraits<kTypeXmm>::kSignature :
+ size <= 32 ? RegTraits<kTypeYmm>::kSignature : RegTraits<kTypeZmm>::kSignature;
+ }
+
+ //! Tests whether the `op` operand is either a low or high 8-bit GPB register.
+ static inline bool isGpb(const Operand_& op) noexcept {
+ // Check operand type, register group, and size. Not interested in register type.
+ const uint32_t kSgn = (Operand::kOpReg << kSignatureOpShift ) |
+ (1 << kSignatureSizeShift) ;
+ return (op.signature() & (kSignatureOpMask | kSignatureSizeMask)) == kSgn;
+ }
+
+ static inline bool isGpbLo(const Operand_& op) noexcept { return op.as<Reg>().isGpbLo(); }
+ static inline bool isGpbHi(const Operand_& op) noexcept { return op.as<Reg>().isGpbHi(); }
+ static inline bool isGpw(const Operand_& op) noexcept { return op.as<Reg>().isGpw(); }
+ static inline bool isGpd(const Operand_& op) noexcept { return op.as<Reg>().isGpd(); }
+ static inline bool isGpq(const Operand_& op) noexcept { return op.as<Reg>().isGpq(); }
+ static inline bool isXmm(const Operand_& op) noexcept { return op.as<Reg>().isXmm(); }
+ static inline bool isYmm(const Operand_& op) noexcept { return op.as<Reg>().isYmm(); }
+ static inline bool isZmm(const Operand_& op) noexcept { return op.as<Reg>().isZmm(); }
+ static inline bool isMm(const Operand_& op) noexcept { return op.as<Reg>().isMm(); }
+ static inline bool isKReg(const Operand_& op) noexcept { return op.as<Reg>().isKReg(); }
+ static inline bool isSReg(const Operand_& op) noexcept { return op.as<Reg>().isSReg(); }
+ static inline bool isCReg(const Operand_& op) noexcept { return op.as<Reg>().isCReg(); }
+ static inline bool isDReg(const Operand_& op) noexcept { return op.as<Reg>().isDReg(); }
+ static inline bool isSt(const Operand_& op) noexcept { return op.as<Reg>().isSt(); }
+ static inline bool isBnd(const Operand_& op) noexcept { return op.as<Reg>().isBnd(); }
+ static inline bool isRip(const Operand_& op) noexcept { return op.as<Reg>().isRip(); }
+
+ static inline bool isGpb(const Operand_& op, uint32_t rId) noexcept { return isGpb(op) & (op.id() == rId); }
+ static inline bool isGpbLo(const Operand_& op, uint32_t rId) noexcept { return isGpbLo(op) & (op.id() == rId); }
+ static inline bool isGpbHi(const Operand_& op, uint32_t rId) noexcept { return isGpbHi(op) & (op.id() == rId); }
+ static inline bool isGpw(const Operand_& op, uint32_t rId) noexcept { return isGpw(op) & (op.id() == rId); }
+ static inline bool isGpd(const Operand_& op, uint32_t rId) noexcept { return isGpd(op) & (op.id() == rId); }
+ static inline bool isGpq(const Operand_& op, uint32_t rId) noexcept { return isGpq(op) & (op.id() == rId); }
+ static inline bool isXmm(const Operand_& op, uint32_t rId) noexcept { return isXmm(op) & (op.id() == rId); }
+ static inline bool isYmm(const Operand_& op, uint32_t rId) noexcept { return isYmm(op) & (op.id() == rId); }
+ static inline bool isZmm(const Operand_& op, uint32_t rId) noexcept { return isZmm(op) & (op.id() == rId); }
+ static inline bool isMm(const Operand_& op, uint32_t rId) noexcept { return isMm(op) & (op.id() == rId); }
+ static inline bool isKReg(const Operand_& op, uint32_t rId) noexcept { return isKReg(op) & (op.id() == rId); }
+ static inline bool isSReg(const Operand_& op, uint32_t rId) noexcept { return isSReg(op) & (op.id() == rId); }
+ static inline bool isCReg(const Operand_& op, uint32_t rId) noexcept { return isCReg(op) & (op.id() == rId); }
+ static inline bool isDReg(const Operand_& op, uint32_t rId) noexcept { return isDReg(op) & (op.id() == rId); }
+ static inline bool isSt(const Operand_& op, uint32_t rId) noexcept { return isSt(op) & (op.id() == rId); }
+ static inline bool isBnd(const Operand_& op, uint32_t rId) noexcept { return isBnd(op) & (op.id() == rId); }
+ static inline bool isRip(const Operand_& op, uint32_t rId) noexcept { return isRip(op) & (op.id() == rId); }
+};
+
+//! General purpose register (X86).
+class Gp : public Reg {
+public:
+ ASMJIT_DEFINE_ABSTRACT_REG(Gp, Reg)
+
+ //! Physical id (X86).
+ //!
+ //! \note Register indexes have been reduced to only support general purpose
+ //! registers. There is no need to have enumerations with number suffix that
+ //! expands to the exactly same value as the suffix value itself.
+ enum Id : uint32_t {
+ kIdAx = 0, //!< Physical id of AL|AH|AX|EAX|RAX registers.
+ kIdCx = 1, //!< Physical id of CL|CH|CX|ECX|RCX registers.
+ kIdDx = 2, //!< Physical id of DL|DH|DX|EDX|RDX registers.
+ kIdBx = 3, //!< Physical id of BL|BH|BX|EBX|RBX registers.
+ kIdSp = 4, //!< Physical id of SPL|SP|ESP|RSP registers.
+ kIdBp = 5, //!< Physical id of BPL|BP|EBP|RBP registers.
+ kIdSi = 6, //!< Physical id of SIL|SI|ESI|RSI registers.
+ kIdDi = 7, //!< Physical id of DIL|DI|EDI|RDI registers.
+ kIdR8 = 8, //!< Physical id of R8B|R8W|R8D|R8 registers (64-bit only).
+ kIdR9 = 9, //!< Physical id of R9B|R9W|R9D|R9 registers (64-bit only).
+ kIdR10 = 10, //!< Physical id of R10B|R10W|R10D|R10 registers (64-bit only).
+ kIdR11 = 11, //!< Physical id of R11B|R11W|R11D|R11 registers (64-bit only).
+ kIdR12 = 12, //!< Physical id of R12B|R12W|R12D|R12 registers (64-bit only).
+ kIdR13 = 13, //!< Physical id of R13B|R13W|R13D|R13 registers (64-bit only).
+ kIdR14 = 14, //!< Physical id of R14B|R14W|R14D|R14 registers (64-bit only).
+ kIdR15 = 15 //!< Physical id of R15B|R15W|R15D|R15 registers (64-bit only).
+ };
+
+ //! Casts this register to 8-bit (LO) part.
+ inline GpbLo r8() const noexcept;
+ //! Casts this register to 8-bit (LO) part.
+ inline GpbLo r8Lo() const noexcept;
+ //! Casts this register to 8-bit (HI) part.
+ inline GpbHi r8Hi() const noexcept;
+ //! Casts this register to 16-bit.
+ inline Gpw r16() const noexcept;
+ //! Casts this register to 32-bit.
+ inline Gpd r32() const noexcept;
+ //! Casts this register to 64-bit.
+ inline Gpq r64() const noexcept;
+};
+
+//! Vector register (XMM|YMM|ZMM) (X86).
+class Vec : public Reg {
+ ASMJIT_DEFINE_ABSTRACT_REG(Vec, Reg)
+
+ //! Casts this register to XMM (clone).
+ inline Xmm xmm() const noexcept;
+ //! Casts this register to YMM.
+ inline Ymm ymm() const noexcept;
+ //! Casts this register to ZMM.
+ inline Zmm zmm() const noexcept;
+
+ //! Casts this register to a register that has half the size (or XMM if it's already XMM).
+ inline Vec half() const noexcept {
+ return Vec(type() == kTypeZmm ? signatureOf(kTypeYmm) : signatureOf(kTypeXmm), id());
+ }
+};
+
+//! Segment register (X86).
+class SReg : public Reg {
+ ASMJIT_DEFINE_FINAL_REG(SReg, Reg, RegTraits<kTypeSReg>)
+
+ //! X86 segment id.
+ enum Id : uint32_t {
+ kIdNone = 0, //!< No segment (default).
+ kIdEs = 1, //!< ES segment.
+ kIdCs = 2, //!< CS segment.
+ kIdSs = 3, //!< SS segment.
+ kIdDs = 4, //!< DS segment.
+ kIdFs = 5, //!< FS segment.
+ kIdGs = 6, //!< GS segment.
+
+ //! Count of segment registers supported by AsmJit.
+ //!
+ //! \note X86 architecture has 6 segment registers - ES, CS, SS, DS, FS, GS.
+ //! X64 architecture lowers them down to just FS and GS. AsmJit supports 7
+ //! segment registers - all addressable in both and X64 modes and one
+ //! extra called `SReg::kIdNone`, which is AsmJit specific and means that
+ //! there is no segment register specified.
+ kIdCount = 7
+ };
+};
+
+//! GPB low or high register (X86).
+class Gpb : public Gp { ASMJIT_DEFINE_ABSTRACT_REG(Gpb, Gp) };
+//! GPB low register (X86).
+class GpbLo : public Gpb { ASMJIT_DEFINE_FINAL_REG(GpbLo, Gpb, RegTraits<kTypeGpbLo>) };
+//! GPB high register (X86).
+class GpbHi : public Gpb { ASMJIT_DEFINE_FINAL_REG(GpbHi, Gpb, RegTraits<kTypeGpbHi>) };
+//! GPW register (X86).
+class Gpw : public Gp { ASMJIT_DEFINE_FINAL_REG(Gpw, Gp, RegTraits<kTypeGpw>) };
+//! GPD register (X86).
+class Gpd : public Gp { ASMJIT_DEFINE_FINAL_REG(Gpd, Gp, RegTraits<kTypeGpd>) };
+//! GPQ register (X86_64).
+class Gpq : public Gp { ASMJIT_DEFINE_FINAL_REG(Gpq, Gp, RegTraits<kTypeGpq>) };
+
+//! 128-bit XMM register (SSE+).
+class Xmm : public Vec {
+ ASMJIT_DEFINE_FINAL_REG(Xmm, Vec, RegTraits<kTypeXmm>)
+ //! Casts this register to a register that has half the size (XMM).
+ inline Xmm half() const noexcept { return Xmm(id()); }
+};
+
+//! 256-bit YMM register (AVX+).
+class Ymm : public Vec {
+ ASMJIT_DEFINE_FINAL_REG(Ymm, Vec, RegTraits<kTypeYmm>)
+ //! Casts this register to a register that has half the size (XMM).
+ inline Xmm half() const noexcept { return Xmm(id()); }
+};
+
+//! 512-bit ZMM register (AVX512+).
+class Zmm : public Vec {
+ ASMJIT_DEFINE_FINAL_REG(Zmm, Vec, RegTraits<kTypeZmm>)
+ //! Casts this register to a register that has half the size (YMM).
+ inline Ymm half() const noexcept { return Ymm(id()); }
+};
+
+//! 64-bit MMX register (MMX+).
+class Mm : public Reg { ASMJIT_DEFINE_FINAL_REG(Mm, Reg, RegTraits<kTypeMm>) };
+//! 64-bit K register (AVX512+).
+class KReg : public Reg { ASMJIT_DEFINE_FINAL_REG(KReg, Reg, RegTraits<kTypeKReg>) };
+//! 32-bit or 64-bit control register (X86).
+class CReg : public Reg { ASMJIT_DEFINE_FINAL_REG(CReg, Reg, RegTraits<kTypeCReg>) };
+//! 32-bit or 64-bit debug register (X86).
+class DReg : public Reg { ASMJIT_DEFINE_FINAL_REG(DReg, Reg, RegTraits<kTypeDReg>) };
+//! 80-bit FPU register (X86).
+class St : public Reg { ASMJIT_DEFINE_FINAL_REG(St, Reg, RegTraits<kTypeSt>) };
+//! 128-bit BND register (BND+).
+class Bnd : public Reg { ASMJIT_DEFINE_FINAL_REG(Bnd, Reg, RegTraits<kTypeBnd>) };
+//! RIP register (X86).
+class Rip : public Reg { ASMJIT_DEFINE_FINAL_REG(Rip, Reg, RegTraits<kTypeRip>) };
+
+//! \cond
+inline GpbLo Gp::r8() const noexcept { return GpbLo(id()); }
+inline GpbLo Gp::r8Lo() const noexcept { return GpbLo(id()); }
+inline GpbHi Gp::r8Hi() const noexcept { return GpbHi(id()); }
+inline Gpw Gp::r16() const noexcept { return Gpw(id()); }
+inline Gpd Gp::r32() const noexcept { return Gpd(id()); }
+inline Gpq Gp::r64() const noexcept { return Gpq(id()); }
+inline Xmm Vec::xmm() const noexcept { return Xmm(id()); }
+inline Ymm Vec::ymm() const noexcept { return Ymm(id()); }
+inline Zmm Vec::zmm() const noexcept { return Zmm(id()); }
+//! \endcond
+
+// ============================================================================
+// [asmjit::x86::Mem]
+// ============================================================================
+
+//! Memory operand.
+class Mem : public BaseMem {
+public:
+ //! Additional bits of operand's signature used by `Mem`.
+ enum AdditionalBits : uint32_t {
+ kSignatureMemSegmentShift = 16,
+ kSignatureMemSegmentMask = 0x07u << kSignatureMemSegmentShift,
+
+ kSignatureMemShiftShift = 19,
+ kSignatureMemShiftMask = 0x03u << kSignatureMemShiftShift,
+
+ kSignatureMemBroadcastShift = 21,
+ kSignatureMemBroadcastMask = 0x7u << kSignatureMemBroadcastShift
+ };
+
+ enum Broadcast : uint32_t {
+ kBroadcast1To1 = 0,
+ kBroadcast1To2 = 1,
+ kBroadcast1To4 = 2,
+ kBroadcast1To8 = 3,
+ kBroadcast1To16 = 4,
+ kBroadcast1To32 = 5,
+ kBroadcast1To64 = 6
+ };
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ //! Creates a default `Mem` operand that points to [0].
+ constexpr Mem() noexcept
+ : BaseMem() {}
+
+ constexpr Mem(const Mem& other) noexcept
+ : BaseMem(other) {}
+
+ //! \cond INTERNAL
+ //!
+ //! A constructor used internally to create `Mem` operand from `Decomposed` data.
+ constexpr explicit Mem(const Decomposed& d) noexcept
+ : BaseMem(d) {}
+ //! \endcond
+
+ constexpr Mem(const Label& base, int32_t off, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { Label::kLabelTag, base.id(), 0, 0, off, size, flags }) {}
+
+ constexpr Mem(const Label& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { Label::kLabelTag, base.id(), index.type(), index.id(), off, size, flags | (shift << kSignatureMemShiftShift) }) {}
+
+ constexpr Mem(const BaseReg& base, int32_t off, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { base.type(), base.id(), 0, 0, off, size, flags }) {}
+
+ constexpr Mem(const BaseReg& base, const BaseReg& index, uint32_t shift, int32_t off, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { base.type(), base.id(), index.type(), index.id(), off, size, flags | (shift << kSignatureMemShiftShift) }) {}
+
+ constexpr explicit Mem(uint64_t base, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { 0, uint32_t(base >> 32), 0, 0, int32_t(uint32_t(base & 0xFFFFFFFFu)), size, flags }) {}
+
+ constexpr Mem(uint64_t base, const BaseReg& index, uint32_t shift = 0, uint32_t size = 0, uint32_t flags = 0) noexcept
+ : BaseMem(Decomposed { 0, uint32_t(base >> 32), index.type(), index.id(), int32_t(uint32_t(base & 0xFFFFFFFFu)), size, flags | (shift << kSignatureMemShiftShift) }) {}
+
+ constexpr Mem(Globals::Init_, uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) noexcept
+ : BaseMem(Globals::Init, u0, u1, u2, u3) {}
+
+ inline explicit Mem(Globals::NoInit_) noexcept
+ : BaseMem(Globals::NoInit) {}
+
+ //! Clones the memory operand.
+ constexpr Mem clone() const noexcept { return Mem(*this); }
+
+ //! Creates a new copy of this memory operand adjusted by `off`.
+ inline Mem cloneAdjusted(int64_t off) const noexcept {
+ Mem result(*this);
+ result.addOffset(off);
+ return result;
+ }
+
+ //! Converts memory `baseType` and `baseId` to `x86::Reg` instance.
+ //!
+ //! The memory must have a valid base register otherwise the result will be wrong.
+ inline Reg baseReg() const noexcept { return Reg::fromTypeAndId(baseType(), baseId()); }
+
+ //! Converts memory `indexType` and `indexId` to `x86::Reg` instance.
+ //!
+ //! The memory must have a valid index register otherwise the result will be wrong.
+ inline Reg indexReg() const noexcept { return Reg::fromTypeAndId(indexType(), indexId()); }
+
+ constexpr Mem _1to1() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To1 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to2() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To2 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to4() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To4 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to8() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To8 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to16() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To16 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to32() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To32 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+ constexpr Mem _1to64() const noexcept { return Mem(Globals::Init, (_signature & ~kSignatureMemBroadcastMask) | (kBroadcast1To64 << kSignatureMemBroadcastShift), _baseId, _data[0], _data[1]); }
+
+ // --------------------------------------------------------------------------
+ // [Mem]
+ // --------------------------------------------------------------------------
+
+ using BaseMem::setIndex;
+
+ inline void setIndex(const BaseReg& index, uint32_t shift) noexcept {
+ setIndex(index);
+ setShift(shift);
+ }
+
+ //! Tests whether the memory operand has a segment override.
+ constexpr bool hasSegment() const noexcept { return _hasSignaturePart<kSignatureMemSegmentMask>(); }
+ //! Returns the associated segment override as `SReg` operand.
+ constexpr SReg segment() const noexcept { return SReg(segmentId()); }
+ //! Returns segment override register id, see `SReg::Id`.
+ constexpr uint32_t segmentId() const noexcept { return _getSignaturePart<kSignatureMemSegmentMask>(); }
+
+ //! Sets the segment override to `seg`.
+ inline void setSegment(const SReg& seg) noexcept { setSegment(seg.id()); }
+ //! Sets the segment override to `id`.
+ inline void setSegment(uint32_t rId) noexcept { _setSignaturePart<kSignatureMemSegmentMask>(rId); }
+ //! Resets the segment override.
+ inline void resetSegment() noexcept { _setSignaturePart<kSignatureMemSegmentMask>(0); }
+
+ //! Tests whether the memory operand has shift (aka scale) value.
+ constexpr bool hasShift() const noexcept { return _hasSignaturePart<kSignatureMemShiftMask>(); }
+ //! Returns the memory operand's shift (aka scale) value.
+ constexpr uint32_t shift() const noexcept { return _getSignaturePart<kSignatureMemShiftMask>(); }
+ //! Sets the memory operand's shift (aka scale) value.
+ inline void setShift(uint32_t shift) noexcept { _setSignaturePart<kSignatureMemShiftMask>(shift); }
+ //! Resets the memory operand's shift (aka scale) value to zero.
+ inline void resetShift() noexcept { _setSignaturePart<kSignatureMemShiftMask>(0); }
+
+ //! Tests whether the memory operand has broadcast {1tox}.
+ constexpr bool hasBroadcast() const noexcept { return _hasSignaturePart<kSignatureMemBroadcastMask>(); }
+ //! Returns the memory operand's broadcast.
+ constexpr uint32_t getBroadcast() const noexcept { return _getSignaturePart<kSignatureMemBroadcastMask>(); }
+ //! Sets the memory operand's broadcast.
+ inline void setBroadcast(uint32_t bcst) noexcept { _setSignaturePart<kSignatureMemBroadcastMask>(bcst); }
+ //! Resets the memory operand's broadcast to none.
+ inline void resetBroadcast() noexcept { _setSignaturePart<kSignatureMemBroadcastMask>(0); }
+
+ // --------------------------------------------------------------------------
+ // [Operator Overload]
+ // --------------------------------------------------------------------------
+
+ inline Mem& operator=(const Mem& other) noexcept = default;
+};
+
+// ============================================================================
+// [asmjit::x86::OpData]
+// ============================================================================
+
+struct OpData {
+ //! Information about all architecture registers.
+ ArchRegs archRegs;
+};
+ASMJIT_VARAPI const OpData opData;
+
+//! \cond
+// ... Reg methods that require `opData`.
+inline uint32_t Reg::groupOf(uint32_t rType) noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return opData.archRegs.regInfo[rType].group();
+}
+
+inline uint32_t Reg::typeIdOf(uint32_t rType) noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return opData.archRegs.regTypeToTypeId[rType];
+}
+
+inline uint32_t Reg::signatureOf(uint32_t rType) noexcept {
+ ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
+ return opData.archRegs.regInfo[rType].signature();
+}
+//! \endcond
+
+// ============================================================================
+// [asmjit::x86::regs]
+// ============================================================================
+
+namespace regs {
+
+//! Creates an 8-bit low GPB register operand.
+static constexpr GpbLo gpb(uint32_t rId) noexcept { return GpbLo(rId); }
+//! Creates an 8-bit low GPB register operand.
+static constexpr GpbLo gpb_lo(uint32_t rId) noexcept { return GpbLo(rId); }
+//! Creates an 8-bit high GPB register operand.
+static constexpr GpbHi gpb_hi(uint32_t rId) noexcept { return GpbHi(rId); }
+//! Creates a 16-bit GPW register operand.
+static constexpr Gpw gpw(uint32_t rId) noexcept { return Gpw(rId); }
+//! Creates a 32-bit GPD register operand.
+static constexpr Gpd gpd(uint32_t rId) noexcept { return Gpd(rId); }
+//! Creates a 64-bit GPQ register operand (64-bit).
+static constexpr Gpq gpq(uint32_t rId) noexcept { return Gpq(rId); }
+//! Creates a 128-bit XMM register operand.
+static constexpr Xmm xmm(uint32_t rId) noexcept { return Xmm(rId); }
+//! Creates a 256-bit YMM register operand.
+static constexpr Ymm ymm(uint32_t rId) noexcept { return Ymm(rId); }
+//! Creates a 512-bit ZMM register operand.
+static constexpr Zmm zmm(uint32_t rId) noexcept { return Zmm(rId); }
+//! Creates a 64-bit Mm register operand.
+static constexpr Mm mm(uint32_t rId) noexcept { return Mm(rId); }
+//! Creates a 64-bit K register operand.
+static constexpr KReg k(uint32_t rId) noexcept { return KReg(rId); }
+//! Creates a 32-bit or 64-bit control register operand.
+static constexpr CReg cr(uint32_t rId) noexcept { return CReg(rId); }
+//! Creates a 32-bit or 64-bit debug register operand.
+static constexpr DReg dr(uint32_t rId) noexcept { return DReg(rId); }
+//! Creates an 80-bit st register operand.
+static constexpr St st(uint32_t rId) noexcept { return St(rId); }
+//! Creates a 128-bit bound register operand.
+static constexpr Bnd bnd(uint32_t rId) noexcept { return Bnd(rId); }
+
+static constexpr Gp al(GpbLo::kSignature, Gp::kIdAx);
+static constexpr Gp bl(GpbLo::kSignature, Gp::kIdBx);
+static constexpr Gp cl(GpbLo::kSignature, Gp::kIdCx);
+static constexpr Gp dl(GpbLo::kSignature, Gp::kIdDx);
+static constexpr Gp spl(GpbLo::kSignature, Gp::kIdSp);
+static constexpr Gp bpl(GpbLo::kSignature, Gp::kIdBp);
+static constexpr Gp sil(GpbLo::kSignature, Gp::kIdSi);
+static constexpr Gp dil(GpbLo::kSignature, Gp::kIdDi);
+static constexpr Gp r8b(GpbLo::kSignature, Gp::kIdR8);
+static constexpr Gp r9b(GpbLo::kSignature, Gp::kIdR9);
+static constexpr Gp r10b(GpbLo::kSignature, Gp::kIdR10);
+static constexpr Gp r11b(GpbLo::kSignature, Gp::kIdR11);
+static constexpr Gp r12b(GpbLo::kSignature, Gp::kIdR12);
+static constexpr Gp r13b(GpbLo::kSignature, Gp::kIdR13);
+static constexpr Gp r14b(GpbLo::kSignature, Gp::kIdR14);
+static constexpr Gp r15b(GpbLo::kSignature, Gp::kIdR15);
+
+static constexpr Gp ah(GpbHi::kSignature, Gp::kIdAx);
+static constexpr Gp bh(GpbHi::kSignature, Gp::kIdBx);
+static constexpr Gp ch(GpbHi::kSignature, Gp::kIdCx);
+static constexpr Gp dh(GpbHi::kSignature, Gp::kIdDx);
+
+static constexpr Gp ax(Gpw::kSignature, Gp::kIdAx);
+static constexpr Gp bx(Gpw::kSignature, Gp::kIdBx);
+static constexpr Gp cx(Gpw::kSignature, Gp::kIdCx);
+static constexpr Gp dx(Gpw::kSignature, Gp::kIdDx);
+static constexpr Gp sp(Gpw::kSignature, Gp::kIdSp);
+static constexpr Gp bp(Gpw::kSignature, Gp::kIdBp);
+static constexpr Gp si(Gpw::kSignature, Gp::kIdSi);
+static constexpr Gp di(Gpw::kSignature, Gp::kIdDi);
+static constexpr Gp r8w(Gpw::kSignature, Gp::kIdR8);
+static constexpr Gp r9w(Gpw::kSignature, Gp::kIdR9);
+static constexpr Gp r10w(Gpw::kSignature, Gp::kIdR10);
+static constexpr Gp r11w(Gpw::kSignature, Gp::kIdR11);
+static constexpr Gp r12w(Gpw::kSignature, Gp::kIdR12);
+static constexpr Gp r13w(Gpw::kSignature, Gp::kIdR13);
+static constexpr Gp r14w(Gpw::kSignature, Gp::kIdR14);
+static constexpr Gp r15w(Gpw::kSignature, Gp::kIdR15);
+
+static constexpr Gp eax(Gpd::kSignature, Gp::kIdAx);
+static constexpr Gp ebx(Gpd::kSignature, Gp::kIdBx);
+static constexpr Gp ecx(Gpd::kSignature, Gp::kIdCx);
+static constexpr Gp edx(Gpd::kSignature, Gp::kIdDx);
+static constexpr Gp esp(Gpd::kSignature, Gp::kIdSp);
+static constexpr Gp ebp(Gpd::kSignature, Gp::kIdBp);
+static constexpr Gp esi(Gpd::kSignature, Gp::kIdSi);
+static constexpr Gp edi(Gpd::kSignature, Gp::kIdDi);
+static constexpr Gp r8d(Gpd::kSignature, Gp::kIdR8);
+static constexpr Gp r9d(Gpd::kSignature, Gp::kIdR9);
+static constexpr Gp r10d(Gpd::kSignature, Gp::kIdR10);
+static constexpr Gp r11d(Gpd::kSignature, Gp::kIdR11);
+static constexpr Gp r12d(Gpd::kSignature, Gp::kIdR12);
+static constexpr Gp r13d(Gpd::kSignature, Gp::kIdR13);
+static constexpr Gp r14d(Gpd::kSignature, Gp::kIdR14);
+static constexpr Gp r15d(Gpd::kSignature, Gp::kIdR15);
+
+static constexpr Gp rax(Gpq::kSignature, Gp::kIdAx);
+static constexpr Gp rbx(Gpq::kSignature, Gp::kIdBx);
+static constexpr Gp rcx(Gpq::kSignature, Gp::kIdCx);
+static constexpr Gp rdx(Gpq::kSignature, Gp::kIdDx);
+static constexpr Gp rsp(Gpq::kSignature, Gp::kIdSp);
+static constexpr Gp rbp(Gpq::kSignature, Gp::kIdBp);
+static constexpr Gp rsi(Gpq::kSignature, Gp::kIdSi);
+static constexpr Gp rdi(Gpq::kSignature, Gp::kIdDi);
+static constexpr Gp r8(Gpq::kSignature, Gp::kIdR8);
+static constexpr Gp r9(Gpq::kSignature, Gp::kIdR9);
+static constexpr Gp r10(Gpq::kSignature, Gp::kIdR10);
+static constexpr Gp r11(Gpq::kSignature, Gp::kIdR11);
+static constexpr Gp r12(Gpq::kSignature, Gp::kIdR12);
+static constexpr Gp r13(Gpq::kSignature, Gp::kIdR13);
+static constexpr Gp r14(Gpq::kSignature, Gp::kIdR14);
+static constexpr Gp r15(Gpq::kSignature, Gp::kIdR15);
+
+static constexpr Xmm xmm0(0);
+static constexpr Xmm xmm1(1);
+static constexpr Xmm xmm2(2);
+static constexpr Xmm xmm3(3);
+static constexpr Xmm xmm4(4);
+static constexpr Xmm xmm5(5);
+static constexpr Xmm xmm6(6);
+static constexpr Xmm xmm7(7);
+static constexpr Xmm xmm8(8);
+static constexpr Xmm xmm9(9);
+static constexpr Xmm xmm10(10);
+static constexpr Xmm xmm11(11);
+static constexpr Xmm xmm12(12);
+static constexpr Xmm xmm13(13);
+static constexpr Xmm xmm14(14);
+static constexpr Xmm xmm15(15);
+static constexpr Xmm xmm16(16);
+static constexpr Xmm xmm17(17);
+static constexpr Xmm xmm18(18);
+static constexpr Xmm xmm19(19);
+static constexpr Xmm xmm20(20);
+static constexpr Xmm xmm21(21);
+static constexpr Xmm xmm22(22);
+static constexpr Xmm xmm23(23);
+static constexpr Xmm xmm24(24);
+static constexpr Xmm xmm25(25);
+static constexpr Xmm xmm26(26);
+static constexpr Xmm xmm27(27);
+static constexpr Xmm xmm28(28);
+static constexpr Xmm xmm29(29);
+static constexpr Xmm xmm30(30);
+static constexpr Xmm xmm31(31);
+
+static constexpr Ymm ymm0(0);
+static constexpr Ymm ymm1(1);
+static constexpr Ymm ymm2(2);
+static constexpr Ymm ymm3(3);
+static constexpr Ymm ymm4(4);
+static constexpr Ymm ymm5(5);
+static constexpr Ymm ymm6(6);
+static constexpr Ymm ymm7(7);
+static constexpr Ymm ymm8(8);
+static constexpr Ymm ymm9(9);
+static constexpr Ymm ymm10(10);
+static constexpr Ymm ymm11(11);
+static constexpr Ymm ymm12(12);
+static constexpr Ymm ymm13(13);
+static constexpr Ymm ymm14(14);
+static constexpr Ymm ymm15(15);
+static constexpr Ymm ymm16(16);
+static constexpr Ymm ymm17(17);
+static constexpr Ymm ymm18(18);
+static constexpr Ymm ymm19(19);
+static constexpr Ymm ymm20(20);
+static constexpr Ymm ymm21(21);
+static constexpr Ymm ymm22(22);
+static constexpr Ymm ymm23(23);
+static constexpr Ymm ymm24(24);
+static constexpr Ymm ymm25(25);
+static constexpr Ymm ymm26(26);
+static constexpr Ymm ymm27(27);
+static constexpr Ymm ymm28(28);
+static constexpr Ymm ymm29(29);
+static constexpr Ymm ymm30(30);
+static constexpr Ymm ymm31(31);
+
+static constexpr Zmm zmm0(0);
+static constexpr Zmm zmm1(1);
+static constexpr Zmm zmm2(2);
+static constexpr Zmm zmm3(3);
+static constexpr Zmm zmm4(4);
+static constexpr Zmm zmm5(5);
+static constexpr Zmm zmm6(6);
+static constexpr Zmm zmm7(7);
+static constexpr Zmm zmm8(8);
+static constexpr Zmm zmm9(9);
+static constexpr Zmm zmm10(10);
+static constexpr Zmm zmm11(11);
+static constexpr Zmm zmm12(12);
+static constexpr Zmm zmm13(13);
+static constexpr Zmm zmm14(14);
+static constexpr Zmm zmm15(15);
+static constexpr Zmm zmm16(16);
+static constexpr Zmm zmm17(17);
+static constexpr Zmm zmm18(18);
+static constexpr Zmm zmm19(19);
+static constexpr Zmm zmm20(20);
+static constexpr Zmm zmm21(21);
+static constexpr Zmm zmm22(22);
+static constexpr Zmm zmm23(23);
+static constexpr Zmm zmm24(24);
+static constexpr Zmm zmm25(25);
+static constexpr Zmm zmm26(26);
+static constexpr Zmm zmm27(27);
+static constexpr Zmm zmm28(28);
+static constexpr Zmm zmm29(29);
+static constexpr Zmm zmm30(30);
+static constexpr Zmm zmm31(31);
+
+static constexpr Mm mm0(0);
+static constexpr Mm mm1(1);
+static constexpr Mm mm2(2);
+static constexpr Mm mm3(3);
+static constexpr Mm mm4(4);
+static constexpr Mm mm5(5);
+static constexpr Mm mm6(6);
+static constexpr Mm mm7(7);
+
+static constexpr KReg k0(0);
+static constexpr KReg k1(1);
+static constexpr KReg k2(2);
+static constexpr KReg k3(3);
+static constexpr KReg k4(4);
+static constexpr KReg k5(5);
+static constexpr KReg k6(6);
+static constexpr KReg k7(7);
+
+static constexpr SReg no_seg(SReg::kIdNone);
+static constexpr SReg es(SReg::kIdEs);
+static constexpr SReg cs(SReg::kIdCs);
+static constexpr SReg ss(SReg::kIdSs);
+static constexpr SReg ds(SReg::kIdDs);
+static constexpr SReg fs(SReg::kIdFs);
+static constexpr SReg gs(SReg::kIdGs);
+
+static constexpr CReg cr0(0);
+static constexpr CReg cr1(1);
+static constexpr CReg cr2(2);
+static constexpr CReg cr3(3);
+static constexpr CReg cr4(4);
+static constexpr CReg cr5(5);
+static constexpr CReg cr6(6);
+static constexpr CReg cr7(7);
+static constexpr CReg cr8(8);
+static constexpr CReg cr9(9);
+static constexpr CReg cr10(10);
+static constexpr CReg cr11(11);
+static constexpr CReg cr12(12);
+static constexpr CReg cr13(13);
+static constexpr CReg cr14(14);
+static constexpr CReg cr15(15);
+
+static constexpr DReg dr0(0);
+static constexpr DReg dr1(1);
+static constexpr DReg dr2(2);
+static constexpr DReg dr3(3);
+static constexpr DReg dr4(4);
+static constexpr DReg dr5(5);
+static constexpr DReg dr6(6);
+static constexpr DReg dr7(7);
+static constexpr DReg dr8(8);
+static constexpr DReg dr9(9);
+static constexpr DReg dr10(10);
+static constexpr DReg dr11(11);
+static constexpr DReg dr12(12);
+static constexpr DReg dr13(13);
+static constexpr DReg dr14(14);
+static constexpr DReg dr15(15);
+
+static constexpr St st0(0);
+static constexpr St st1(1);
+static constexpr St st2(2);
+static constexpr St st3(3);
+static constexpr St st4(4);
+static constexpr St st5(5);
+static constexpr St st6(6);
+static constexpr St st7(7);
+
+static constexpr Bnd bnd0(0);
+static constexpr Bnd bnd1(1);
+static constexpr Bnd bnd2(2);
+static constexpr Bnd bnd3(3);
+
+static constexpr Rip rip(0);
+
+} // {regs}
+
+// Make `x86::regs` accessible through `x86` namespace as well.
+using namespace regs;
+
+// ============================================================================
+// [asmjit::x86::ptr]
+// ============================================================================
+
+//! Creates `[base.reg + offset]` memory operand.
+static constexpr Mem ptr(const Gp& base, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, offset, size);
+}
+//! Creates `[base.reg + (index << shift) + offset]` memory operand (scalar index).
+static constexpr Mem ptr(const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, offset, size);
+}
+//! Creates `[base.reg + (index << shift) + offset]` memory operand (vector index).
+static constexpr Mem ptr(const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, offset, size);
+}
+
+//! Creates `[base + offset]` memory operand.
+static constexpr Mem ptr(const Label& base, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, offset, size);
+}
+//! Creates `[base + (index << shift) + offset]` memory operand.
+static constexpr Mem ptr(const Label& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, offset, size);
+}
+//! Creates `[base + (index << shift) + offset]` memory operand.
+static constexpr Mem ptr(const Label& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, offset, size);
+}
+
+//! Creates `[rip + offset]` memory operand.
+static constexpr Mem ptr(const Rip& rip_, int32_t offset = 0, uint32_t size = 0) noexcept {
+ return Mem(rip_, offset, size);
+}
+
+//! Creates `[base]` absolute memory operand.
+static constexpr Mem ptr(uint64_t base, uint32_t size = 0) noexcept {
+ return Mem(base, size);
+}
+//! Creates `[base + (index.reg << shift)]` absolute memory operand.
+static constexpr Mem ptr(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size);
+}
+//! Creates `[base + (index.reg << shift)]` absolute memory operand.
+static constexpr Mem ptr(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size);
+}
+
+//! Creates `[base]` absolute memory operand (absolute).
+static constexpr Mem ptr_abs(uint64_t base, uint32_t size = 0) noexcept {
+ return Mem(base, size, BaseMem::kSignatureMemAbs);
+}
+//! Creates `[base + (index.reg << shift)]` absolute memory operand (absolute).
+static constexpr Mem ptr_abs(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size, BaseMem::kSignatureMemAbs);
+}
+//! Creates `[base + (index.reg << shift)]` absolute memory operand (absolute).
+static constexpr Mem ptr_abs(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size, BaseMem::kSignatureMemAbs);
+}
+
+//! Creates `[base]` relative memory operand (relative).
+static constexpr Mem ptr_rel(uint64_t base, uint32_t size = 0) noexcept {
+ return Mem(base, size, BaseMem::kSignatureMemRel);
+}
+//! Creates `[base + (index.reg << shift)]` relative memory operand (relative).
+static constexpr Mem ptr_rel(uint64_t base, const Reg& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size, BaseMem::kSignatureMemRel);
+}
+//! Creates `[base + (index.reg << shift)]` relative memory operand (relative).
+static constexpr Mem ptr_rel(uint64_t base, const Vec& index, uint32_t shift = 0, uint32_t size = 0) noexcept {
+ return Mem(base, index, shift, size, BaseMem::kSignatureMemRel);
+}
+
+#define ASMJIT_MEM_PTR(FUNC, SIZE) \
+ /*! Creates `[base + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Gp& base, int32_t offset = 0) noexcept { \
+ return Mem(base, offset, SIZE); \
+ } \
+ /*! Creates `[base + (index << shift) + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Gp& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0) noexcept { \
+ return Mem(base, index, shift, offset, SIZE); \
+ } \
+ /*! Creates `[base + (vec_index << shift) + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Gp& base, const Vec& index, uint32_t shift = 0, int32_t offset = 0) noexcept { \
+ return Mem(base, index, shift, offset, SIZE); \
+ } \
+ /*! Creates `[base + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Label& base, int32_t offset = 0) noexcept { \
+ return Mem(base, offset, SIZE); \
+ } \
+ /*! Creates `[base + (index << shift) + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Label& base, const Gp& index, uint32_t shift = 0, int32_t offset = 0) noexcept { \
+ return Mem(base, index, shift, offset, SIZE); \
+ } \
+ /*! Creates `[rip + offset]` memory operand. */ \
+ static constexpr Mem FUNC(const Rip& rip_, int32_t offset = 0) noexcept { \
+ return Mem(rip_, offset, SIZE); \
+ } \
+ /*! Creates `[ptr]` memory operand. */ \
+ static constexpr Mem FUNC(uint64_t base) noexcept { \
+ return Mem(base, SIZE); \
+ } \
+ /*! Creates `[base + (index << shift) + offset]` memory operand. */ \
+ static constexpr Mem FUNC(uint64_t base, const Gp& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE); \
+ } \
+ /*! Creates `[base + (vec_index << shift) + offset]` memory operand. */ \
+ static constexpr Mem FUNC(uint64_t base, const Vec& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE); \
+ } \
+ \
+ /*! Creates `[base + offset]` memory operand (absolute). */ \
+ static constexpr Mem FUNC##_abs(uint64_t base) noexcept { \
+ return Mem(base, SIZE, BaseMem::kSignatureMemAbs); \
+ } \
+ /*! Creates `[base + (index << shift) + offset]` memory operand (absolute). */ \
+ static constexpr Mem FUNC##_abs(uint64_t base, const Gp& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE, BaseMem::kSignatureMemAbs); \
+ } \
+ /*! Creates `[base + (vec_index << shift) + offset]` memory operand (absolute). */ \
+ static constexpr Mem FUNC##_abs(uint64_t base, const Vec& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE, BaseMem::kSignatureMemAbs); \
+ } \
+ \
+ /*! Creates `[base + offset]` memory operand (relative). */ \
+ static constexpr Mem FUNC##_rel(uint64_t base) noexcept { \
+ return Mem(base, SIZE, BaseMem::kSignatureMemRel); \
+ } \
+ /*! Creates `[base + (index << shift) + offset]` memory operand (relative). */ \
+ static constexpr Mem FUNC##_rel(uint64_t base, const Gp& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE, BaseMem::kSignatureMemRel); \
+ } \
+ /*! Creates `[base + (vec_index << shift) + offset]` memory operand (relative). */ \
+ static constexpr Mem FUNC##_rel(uint64_t base, const Vec& index, uint32_t shift = 0) noexcept { \
+ return Mem(base, index, shift, SIZE, BaseMem::kSignatureMemRel); \
+ }
+
+// Definition of memory operand constructors that use platform independent naming.
+ASMJIT_MEM_PTR(ptr_8, 1)
+ASMJIT_MEM_PTR(ptr_16, 2)
+ASMJIT_MEM_PTR(ptr_32, 4)
+ASMJIT_MEM_PTR(ptr_48, 6)
+ASMJIT_MEM_PTR(ptr_64, 8)
+ASMJIT_MEM_PTR(ptr_80, 10)
+ASMJIT_MEM_PTR(ptr_128, 16)
+ASMJIT_MEM_PTR(ptr_256, 32)
+ASMJIT_MEM_PTR(ptr_512, 64)
+
+// Definition of memory operand constructors that use X86-specific convention.
+ASMJIT_MEM_PTR(byte_ptr, 1)
+ASMJIT_MEM_PTR(word_ptr, 2)
+ASMJIT_MEM_PTR(dword_ptr, 4)
+ASMJIT_MEM_PTR(qword_ptr, 8)
+ASMJIT_MEM_PTR(tword_ptr, 10)
+ASMJIT_MEM_PTR(oword_ptr, 16)
+ASMJIT_MEM_PTR(dqword_ptr, 16)
+ASMJIT_MEM_PTR(qqword_ptr, 32)
+ASMJIT_MEM_PTR(xmmword_ptr, 16)
+ASMJIT_MEM_PTR(ymmword_ptr, 32)
+ASMJIT_MEM_PTR(zmmword_ptr, 64)
+
+#undef ASMJIT_MEM_PTR
+
+//! \}
+
+ASMJIT_END_SUB_NAMESPACE
+
+// ============================================================================
+// [asmjit::Type::IdOfT<x86::Reg>]
+// ============================================================================
+
+//! \cond INTERNAL
+
+ASMJIT_BEGIN_NAMESPACE
+ASMJIT_DEFINE_TYPE_ID(x86::Gpb, kIdI8);
+ASMJIT_DEFINE_TYPE_ID(x86::Gpw, kIdI16);
+ASMJIT_DEFINE_TYPE_ID(x86::Gpd, kIdI32);
+ASMJIT_DEFINE_TYPE_ID(x86::Gpq, kIdI64);
+ASMJIT_DEFINE_TYPE_ID(x86::Mm , kIdMmx64);
+ASMJIT_DEFINE_TYPE_ID(x86::Xmm, kIdI32x4);
+ASMJIT_DEFINE_TYPE_ID(x86::Ymm, kIdI32x8);
+ASMJIT_DEFINE_TYPE_ID(x86::Zmm, kIdI32x16);
+ASMJIT_END_NAMESPACE
+
+//! \endcond
+
+#endif // ASMJIT_X86_X86OPERAND_H_INCLUDED
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp b/3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp
new file mode 100644
index 00000000000..cd6ebb5d6d5
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86rapass.cpp
@@ -0,0 +1,1172 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#include "../core/api-build_p.h"
+#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_NO_COMPILER)
+
+#include "../core/cpuinfo.h"
+#include "../core/support.h"
+#include "../core/type.h"
+#include "../x86/x86assembler.h"
+#include "../x86/x86compiler.h"
+#include "../x86/x86instapi_p.h"
+#include "../x86/x86instdb_p.h"
+#include "../x86/x86internal_p.h"
+#include "../x86/x86rapass_p.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+// ============================================================================
+// [asmjit::x86::X86RAPass - Helpers]
+// ============================================================================
+
+static ASMJIT_INLINE uint64_t raImmMaskFromSize(uint32_t size) noexcept {
+ ASMJIT_ASSERT(size > 0 && size < 256);
+ static const uint64_t masks[] = {
+ 0x00000000000000FFu, // 1
+ 0x000000000000FFFFu, // 2
+ 0x00000000FFFFFFFFu, // 4
+ 0xFFFFFFFFFFFFFFFFu, // 8
+ 0x0000000000000000u, // 16
+ 0x0000000000000000u, // 32
+ 0x0000000000000000u, // 64
+ 0x0000000000000000u, // 128
+ 0x0000000000000000u // 256
+ };
+ return masks[Support::ctz(size)];
+}
+
+static ASMJIT_INLINE uint32_t raUseOutFlagsFromRWFlags(uint32_t rwFlags) noexcept {
+ static const uint32_t map[] = {
+ 0,
+ RATiedReg::kRead | RATiedReg::kUse, // kRead
+ RATiedReg::kWrite | RATiedReg::kOut, // kWrite
+ RATiedReg::kRW | RATiedReg::kUse, // kRW
+ 0,
+ RATiedReg::kRead | RATiedReg::kUse | RATiedReg::kUseRM, // kRead | kRegMem
+ RATiedReg::kWrite | RATiedReg::kOut | RATiedReg::kOutRM, // kWrite | kRegMem
+ RATiedReg::kRW | RATiedReg::kUse | RATiedReg::kUseRM // kRW | kRegMem
+ };
+
+ return map[rwFlags & (OpRWInfo::kRW | OpRWInfo::kRegMem)];
+}
+
+static ASMJIT_INLINE uint32_t raRegRwFlags(uint32_t flags) noexcept {
+ return raUseOutFlagsFromRWFlags(flags);
+}
+
+static ASMJIT_INLINE uint32_t raMemBaseRwFlags(uint32_t flags) noexcept {
+ constexpr uint32_t shift = Support::constCtz(OpRWInfo::kMemBaseRW);
+ return raUseOutFlagsFromRWFlags((flags >> shift) & OpRWInfo::kRW);
+}
+
+static ASMJIT_INLINE uint32_t raMemIndexRwFlags(uint32_t flags) noexcept {
+ constexpr uint32_t shift = Support::constCtz(OpRWInfo::kMemIndexRW);
+ return raUseOutFlagsFromRWFlags((flags >> shift) & OpRWInfo::kRW);
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder]
+// ============================================================================
+
+class X86RACFGBuilder : public RACFGBuilder<X86RACFGBuilder> {
+public:
+ uint32_t _archId;
+ bool _is64Bit;
+ bool _avxEnabled;
+
+ inline X86RACFGBuilder(X86RAPass* pass) noexcept
+ : RACFGBuilder<X86RACFGBuilder>(pass),
+ _archId(pass->cc()->archId()),
+ _is64Bit(pass->gpSize() == 8),
+ _avxEnabled(pass->_avxEnabled) {
+ }
+
+ inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cc); }
+
+ inline uint32_t choose(uint32_t sseInst, uint32_t avxInst) const noexcept {
+ return _avxEnabled ? avxInst : sseInst;
+ }
+
+ Error onInst(InstNode* inst, uint32_t& controlType, RAInstBuilder& ib) noexcept;
+
+ Error onBeforeCall(FuncCallNode* call) noexcept;
+ Error onCall(FuncCallNode* call, RAInstBuilder& ib) noexcept;
+
+ Error moveImmToRegArg(FuncCallNode* call, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept;
+ Error moveImmToStackArg(FuncCallNode* call, const FuncValue& arg, const Imm& imm_) noexcept;
+ Error moveRegToStackArg(FuncCallNode* call, const FuncValue& arg, const BaseReg& reg) noexcept;
+
+ Error onBeforeRet(FuncRetNode* funcRet) noexcept;
+ Error onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept;
+};
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - OnInst]
+// ============================================================================
+
+Error X86RACFGBuilder::onInst(InstNode* inst, uint32_t& controlType, RAInstBuilder& ib) noexcept {
+ InstRWInfo rwInfo;
+
+ uint32_t instId = inst->id();
+ if (Inst::isDefinedId(instId)) {
+ uint32_t opCount = inst->opCount();
+ const Operand* opArray = inst->operands();
+ ASMJIT_PROPAGATE(InstInternal::queryRWInfo(_archId, inst->baseInst(), opArray, opCount, rwInfo));
+
+ const InstDB::InstInfo& instInfo = InstDB::infoById(instId);
+ bool hasGpbHiConstraint = false;
+ uint32_t singleRegOps = 0;
+
+ if (opCount) {
+ for (uint32_t i = 0; i < opCount; i++) {
+ const Operand& op = opArray[i];
+ const OpRWInfo& opRwInfo = rwInfo.operand(i);
+
+ if (op.isReg()) {
+ // Register Operand
+ // ----------------
+ const Reg& reg = op.as<Reg>();
+
+ uint32_t flags = raRegRwFlags(opRwInfo.opFlags());
+ uint32_t allowedRegs = 0xFFFFFFFFu;
+
+ // X86-specific constraints related to LO|HI general purpose registers.
+ // This is only required when the register is part of the encoding. If
+ // the register is fixed we won't restrict anything as it doesn't restrict
+ // encoding of other registers.
+ if (reg.isGpb() && !(opRwInfo.opFlags() & OpRWInfo::kRegPhysId)) {
+ flags |= RATiedReg::kX86Gpb;
+ if (!_is64Bit) {
+ // Restrict to first four - AL|AH|BL|BH|CL|CH|DL|DH. In 32-bit mode
+ // it's not possible to access SIL|DIL, etc, so this is just enough.
+ allowedRegs = 0x0Fu;
+ }
+ else {
+ // If we encountered GPB-HI register the situation is much more
+ // complicated than in 32-bit mode. We need to patch all registers
+ // to not use ID higher than 7 and all GPB-LO registers to not use
+ // index higher than 3. Instead of doing the patching here we just
+ // set a flag and will do it later, to not complicate this loop.
+ if (reg.isGpbHi()) {
+ hasGpbHiConstraint = true;
+ allowedRegs = 0x0Fu;
+ }
+ }
+ }
+
+ uint32_t vIndex = Operand::virtIdToIndex(reg.id());
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ // Use RW instead of Write in case that not the whole register is
+ // overwritten. This is important for liveness as we cannot kill a
+ // register that will be used. For example `mov al, 0xFF` is not a
+ // write-only operation if user allocated the whole `rax` register.
+ if ((flags & RATiedReg::kRW) == RATiedReg::kWrite) {
+ if (workReg->regByteMask() & ~(opRwInfo.writeByteMask() | opRwInfo.extendByteMask())) {
+ // Not write-only operation.
+ flags = (flags & ~RATiedReg::kOut) | (RATiedReg::kRead | RATiedReg::kUse);
+ }
+ }
+
+ // Do not use RegMem flag if changing Reg to Mem requires additional
+ // CPU feature that may not be enabled.
+ if (rwInfo.rmFeature() && (flags & (RATiedReg::kUseRM | RATiedReg::kOutRM))) {
+ flags &= ~(RATiedReg::kUseRM | RATiedReg::kOutRM);
+ }
+
+ uint32_t group = workReg->group();
+ uint32_t allocable = _pass->_availableRegs[group] & allowedRegs;
+
+ uint32_t useId = BaseReg::kIdBad;
+ uint32_t outId = BaseReg::kIdBad;
+
+ uint32_t useRewriteMask = 0;
+ uint32_t outRewriteMask = 0;
+
+ if (flags & RATiedReg::kUse) {
+ useRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
+ if (opRwInfo.opFlags() & OpRWInfo::kRegPhysId) {
+ useId = opRwInfo.physId();
+ flags |= RATiedReg::kUseFixed;
+ }
+ }
+ else {
+ outRewriteMask = Support::bitMask(inst->getRewriteIndex(&reg._baseId));
+ if (opRwInfo.opFlags() & OpRWInfo::kRegPhysId) {
+ outId = opRwInfo.physId();
+ flags |= RATiedReg::kOutFixed;
+ }
+ }
+
+ ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, outId, outRewriteMask, opRwInfo.rmSize()));
+ if (singleRegOps == i)
+ singleRegOps++;
+ }
+ }
+ else if (op.isMem()) {
+ // Memory Operand
+ // --------------
+ const Mem& mem = op.as<Mem>();
+ ib.addForbiddenFlags(RATiedReg::kUseRM | RATiedReg::kOutRM);
+
+ if (mem.isRegHome()) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(mem.baseId()), &workReg));
+ _pass->getOrCreateStackSlot(workReg);
+ }
+ else if (mem.hasBaseReg()) {
+ uint32_t vIndex = Operand::virtIdToIndex(mem.baseId());
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ uint32_t flags = raMemBaseRwFlags(opRwInfo.opFlags());
+ uint32_t group = workReg->group();
+ uint32_t allocable = _pass->_availableRegs[group];
+
+ uint32_t useId = BaseReg::kIdBad;
+ uint32_t outId = BaseReg::kIdBad;
+
+ uint32_t useRewriteMask = 0;
+ uint32_t outRewriteMask = 0;
+
+ if (flags & RATiedReg::kUse) {
+ useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
+ if (opRwInfo.opFlags() & OpRWInfo::kMemPhysId) {
+ useId = opRwInfo.physId();
+ flags |= RATiedReg::kUseFixed;
+ }
+ }
+ else {
+ outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._baseId));
+ if (opRwInfo.opFlags() & OpRWInfo::kMemPhysId) {
+ outId = opRwInfo.physId();
+ flags |= RATiedReg::kOutFixed;
+ }
+ }
+
+ ASMJIT_PROPAGATE(ib.add(workReg, flags, allocable, useId, useRewriteMask, outId, outRewriteMask));
+ }
+ }
+
+ if (mem.hasIndexReg()) {
+ uint32_t vIndex = Operand::virtIdToIndex(mem.indexId());
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ uint32_t flags = raMemIndexRwFlags(opRwInfo.opFlags());
+ uint32_t group = workReg->group();
+ uint32_t allocable = _pass->_availableRegs[group];
+
+ // Index registers have never fixed id on X86/x64.
+ const uint32_t useId = BaseReg::kIdBad;
+ const uint32_t outId = BaseReg::kIdBad;
+
+ uint32_t useRewriteMask = 0;
+ uint32_t outRewriteMask = 0;
+
+ if (flags & RATiedReg::kUse)
+ useRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
+ else
+ outRewriteMask = Support::bitMask(inst->getRewriteIndex(&mem._data[Operand::kDataMemIndexId]));
+
+ ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRead, allocable, useId, useRewriteMask, outId, outRewriteMask));
+ }
+ }
+ }
+ }
+ }
+
+ // Handle extra operand (either REP {cx|ecx|rcx} or AVX-512 {k} selector).
+ if (inst->hasExtraReg()) {
+ uint32_t vIndex = Operand::virtIdToIndex(inst->extraReg().id());
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ uint32_t group = workReg->group();
+ uint32_t rewriteMask = Support::bitMask(inst->getRewriteIndex(&inst->extraReg()._id));
+
+ if (group == Gp::kGroupKReg) {
+ // AVX-512 mask selector {k} register - read-only, allocable to any register except {k0}.
+ uint32_t allocableRegs= _pass->_availableRegs[group] & ~Support::bitMask(0);
+ ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRead, allocableRegs, BaseReg::kIdBad, rewriteMask, BaseReg::kIdBad, 0));
+ singleRegOps = 0;
+ }
+ else {
+ // REP {cx|ecx|rcx} register - read & write, allocable to {cx|ecx|rcx} only.
+ ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRW, 0, Gp::kIdCx, rewriteMask, Gp::kIdBad, 0));
+ }
+ }
+ else {
+ uint32_t group = inst->extraReg().group();
+ if (group == Gp::kGroupKReg && inst->extraReg().id() != 0)
+ singleRegOps = 0;
+ }
+ }
+
+ // Handle X86 constraints.
+ if (hasGpbHiConstraint) {
+ for (RATiedReg& tiedReg : ib) {
+ tiedReg._allocableRegs &= tiedReg.hasFlag(RATiedReg::kX86Gpb) ? 0x0Fu : 0xFFu;
+ }
+ }
+
+ if (ib.tiedRegCount() == 1) {
+ // Handle special cases of some instructions where all operands share the same
+ // register. In such case the single operand becomes read-only or write-only.
+ uint32_t singleRegCase = InstDB::kSingleRegNone;
+ if (singleRegOps == opCount) {
+ singleRegCase = instInfo.singleRegCase();
+ }
+ else if (opCount == 2 && inst->opType(1).isImm()) {
+ // Handle some tricks used by X86 asm.
+ const BaseReg& reg = inst->opType(0).as<BaseReg>();
+ const Imm& imm = inst->opType(1).as<Imm>();
+
+ const RAWorkReg* workReg = _pass->workRegById(ib[0]->workId());
+ uint32_t workRegSize = workReg->info().size();
+
+ switch (inst->id()) {
+ case Inst::kIdOr: {
+ // Sets the value of the destination register to -1, previous content unused.
+ if (reg.size() >= 4 || reg.size() >= workRegSize) {
+ if (imm.i64() == -1 || imm.u64() == raImmMaskFromSize(reg.size()))
+ singleRegCase = InstDB::kSingleRegWO;
+ }
+ ASMJIT_FALLTHROUGH;
+ }
+
+ case Inst::kIdAdd:
+ case Inst::kIdAnd:
+ case Inst::kIdRol:
+ case Inst::kIdRor:
+ case Inst::kIdSar:
+ case Inst::kIdShl:
+ case Inst::kIdShr:
+ case Inst::kIdSub:
+ case Inst::kIdXor: {
+ // Updates [E|R]FLAGS without changing the content.
+ if (reg.size() != 4 || reg.size() >= workRegSize) {
+ if (imm.u64() == 0)
+ singleRegCase = InstDB::kSingleRegRO;
+ }
+ break;
+ }
+ }
+ }
+
+ switch (singleRegCase) {
+ case InstDB::kSingleRegNone:
+ break;
+ case InstDB::kSingleRegRO:
+ ib[0]->makeReadOnly();
+ break;
+ case InstDB::kSingleRegWO:
+ ib[0]->makeWriteOnly();
+ break;
+ }
+ }
+
+ controlType = instInfo.controlType();
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - OnCall]
+// ============================================================================
+
+Error X86RACFGBuilder::onBeforeCall(FuncCallNode* call) noexcept {
+ uint32_t argCount = call->argCount();
+ uint32_t retCount = call->retCount();
+ const FuncDetail& fd = call->detail();
+
+ cc()->_setCursor(call->prev());
+
+ for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
+ for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
+ if (!fd.hasArg(argIndex + argHi))
+ continue;
+
+ const FuncValue& arg = fd.arg(argIndex + argHi);
+ const Operand& op = call->arg(argIndex + argHi);
+
+ if (op.isNone())
+ continue;
+
+ if (op.isReg()) {
+ const Reg& reg = op.as<Reg>();
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
+
+ if (arg.isReg()) {
+ uint32_t regGroup = workReg->group();
+ uint32_t argGroup = Reg::groupOf(arg.regType());
+
+ if (regGroup != argGroup) {
+ // TODO:
+ ASMJIT_ASSERT(false);
+ }
+ }
+ else {
+ ASMJIT_PROPAGATE(moveRegToStackArg(call, arg, op.as<BaseReg>()));
+ }
+ }
+ else if (op.isImm()) {
+ if (arg.isReg()) {
+ BaseReg reg;
+ ASMJIT_PROPAGATE(moveImmToRegArg(call, arg, op.as<Imm>(), &reg));
+ call->_args[argIndex + argHi] = reg;
+ }
+ else {
+ ASMJIT_PROPAGATE(moveImmToStackArg(call, arg, op.as<Imm>()));
+ }
+ }
+ }
+ }
+
+ cc()->_setCursor(call);
+ if (fd.hasFlag(CallConv::kFlagCalleePopsStack))
+ ASMJIT_PROPAGATE(cc()->sub(cc()->zsp(), fd.argStackSize()));
+
+ for (uint32_t retIndex = 0; retIndex < retCount; retIndex++) {
+ const FuncValue& ret = fd.ret(retIndex);
+ const Operand& op = call->ret(retIndex);
+
+ if (op.isReg()) {
+ const Reg& reg = op.as<Reg>();
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
+
+ if (ret.isReg()) {
+ if (ret.regType() == Reg::kTypeSt) {
+ if (workReg->group() != Reg::kGroupVec)
+ return DebugUtils::errored(kErrorInvalidAssignment);
+
+ Reg dst = Reg(workReg->signature(), workReg->virtId());
+ Mem mem;
+
+ uint32_t typeId = Type::baseOf(workReg->typeId());
+ if (ret.hasTypeId())
+ typeId = ret.typeId();
+
+ switch (typeId) {
+ case Type::kIdF32:
+ ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 4, 4));
+ mem.setSize(4);
+ ASMJIT_PROPAGATE(cc()->fstp(mem));
+ ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovss, Inst::kIdVmovss), dst.as<Xmm>(), mem));
+ break;
+
+ case Type::kIdF64:
+ ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 8, 4));
+ mem.setSize(8);
+ ASMJIT_PROPAGATE(cc()->fstp(mem));
+ ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovsd, Inst::kIdVmovsd), dst.as<Xmm>(), mem));
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ }
+ else {
+ uint32_t regGroup = workReg->group();
+ uint32_t retGroup = Reg::groupOf(ret.regType());
+
+ if (regGroup != retGroup) {
+ // TODO:
+ ASMJIT_ASSERT(false);
+ }
+ }
+ }
+ }
+ }
+
+ // This block has function call(s).
+ _curBlock->addFlags(RABlock::kFlagHasFuncCalls);
+ _pass->func()->frame().addAttributes(FuncFrame::kAttrHasFuncCalls);
+ _pass->func()->frame().updateCallStackSize(fd.argStackSize());
+
+ return kErrorOk;
+}
+
+Error X86RACFGBuilder::onCall(FuncCallNode* call, RAInstBuilder& ib) noexcept {
+ uint32_t argCount = call->argCount();
+ uint32_t retCount = call->retCount();
+ const FuncDetail& fd = call->detail();
+
+ for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
+ for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
+ if (!fd.hasArg(argIndex + argHi))
+ continue;
+
+ const FuncValue& arg = fd.arg(argIndex + argHi);
+ const Operand& op = call->arg(argIndex + argHi);
+
+ if (op.isNone())
+ continue;
+
+ if (op.isReg()) {
+ const Reg& reg = op.as<Reg>();
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
+
+ if (arg.isReg()) {
+ uint32_t regGroup = workReg->group();
+ uint32_t argGroup = Reg::groupOf(arg.regType());
+
+ if (regGroup == argGroup) {
+ ASMJIT_PROPAGATE(ib.addCallArg(workReg, arg.regId()));
+ }
+ }
+ }
+ }
+ }
+
+ for (uint32_t retIndex = 0; retIndex < retCount; retIndex++) {
+ const FuncValue& ret = fd.ret(retIndex);
+ const Operand& op = call->ret(retIndex);
+
+ // Not handled here...
+ if (ret.regType() == Reg::kTypeSt)
+ continue;
+
+ if (op.isReg()) {
+ const Reg& reg = op.as<Reg>();
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
+
+ if (ret.isReg()) {
+ uint32_t regGroup = workReg->group();
+ uint32_t retGroup = Reg::groupOf(ret.regType());
+
+ if (regGroup == retGroup) {
+ ASMJIT_PROPAGATE(ib.addCallRet(workReg, ret.regId()));
+ }
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ }
+ }
+
+ // Setup clobbered registers.
+ ib._clobbered[0] = Support::lsbMask<uint32_t>(_pass->_physRegCount[0]) & ~fd.preservedRegs(0);
+ ib._clobbered[1] = Support::lsbMask<uint32_t>(_pass->_physRegCount[1]) & ~fd.preservedRegs(1);
+ ib._clobbered[2] = Support::lsbMask<uint32_t>(_pass->_physRegCount[2]) & ~fd.preservedRegs(2);
+ ib._clobbered[3] = Support::lsbMask<uint32_t>(_pass->_physRegCount[3]) & ~fd.preservedRegs(3);
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - MoveImmToRegArg]
+// ============================================================================
+
+Error X86RACFGBuilder::moveImmToRegArg(FuncCallNode* call, const FuncValue& arg, const Imm& imm_, BaseReg* out) noexcept {
+ DebugUtils::unused(call);
+ ASMJIT_ASSERT(arg.isReg());
+
+ Imm imm(imm_);
+ uint32_t rTypeId = Type::kIdU32;
+
+ switch (arg.typeId()) {
+ case Type::kIdI8: imm.signExtend8Bits(); goto MovU32;
+ case Type::kIdU8: imm.zeroExtend8Bits(); goto MovU32;
+ case Type::kIdI16: imm.signExtend16Bits(); goto MovU32;
+ case Type::kIdU16: imm.zeroExtend16Bits(); goto MovU32;
+
+ case Type::kIdI32:
+ case Type::kIdU32:
+MovU32:
+ imm.zeroExtend32Bits();
+ break;
+
+ case Type::kIdI64:
+ case Type::kIdU64:
+ // Moving to GPD automatically zero extends in 64-bit mode.
+ if (imm.isUInt32()) {
+ imm.zeroExtend32Bits();
+ break;
+ }
+
+ rTypeId = Type::kIdU64;
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ ASMJIT_PROPAGATE(cc()->_newReg(*out, rTypeId, nullptr));
+ cc()->virtRegById(out->id())->setWeight(RAPass::kCallArgWeight);
+
+ return cc()->mov(out->as<x86::Gp>(), imm);
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - MoveImmToStackArg]
+// ============================================================================
+
+Error X86RACFGBuilder::moveImmToStackArg(FuncCallNode* call, const FuncValue& arg, const Imm& imm_) noexcept {
+ DebugUtils::unused(call);
+ ASMJIT_ASSERT(arg.isStack());
+
+ Mem mem = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
+ Imm imm[2];
+
+ mem.setSize(4);
+ imm[0] = imm_;
+ uint32_t nMovs = 0;
+
+ // One stack entry has the same size as the native register size. That means
+ // that if we want to move a 32-bit integer on the stack in 64-bit mode, we
+ // need to extend it to a 64-bit integer first. In 32-bit mode, pushing a
+ // 64-bit on stack is done in two steps by pushing low and high parts
+ // separately.
+ switch (arg.typeId()) {
+ case Type::kIdI8: imm[0].signExtend8Bits(); goto MovU32;
+ case Type::kIdU8: imm[0].zeroExtend8Bits(); goto MovU32;
+ case Type::kIdI16: imm[0].signExtend16Bits(); goto MovU32;
+ case Type::kIdU16: imm[0].zeroExtend16Bits(); goto MovU32;
+
+ case Type::kIdI32:
+ case Type::kIdU32:
+ case Type::kIdF32:
+MovU32:
+ imm[0].zeroExtend32Bits();
+ nMovs = 1;
+ break;
+
+ case Type::kIdI64:
+ case Type::kIdU64:
+ case Type::kIdF64:
+ case Type::kIdMmx32:
+ case Type::kIdMmx64:
+ if (_is64Bit && imm[0].isInt32()) {
+ mem.setSize(8);
+ nMovs = 1;
+ break;
+ }
+
+ imm[1].setU32(imm[0].u32Hi());
+ imm[0].zeroExtend32Bits();
+ nMovs = 2;
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidState);
+ }
+
+ for (uint32_t i = 0; i < nMovs; i++) {
+ ASMJIT_PROPAGATE(cc()->mov(mem, imm[i]));
+ mem.addOffsetLo32(int32_t(mem.size()));
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - MoveRegToStackArg]
+// ============================================================================
+
+Error X86RACFGBuilder::moveRegToStackArg(FuncCallNode* call, const FuncValue& arg, const BaseReg& reg) noexcept {
+ DebugUtils::unused(call);
+ ASMJIT_ASSERT(arg.isStack());
+
+ Mem mem = ptr(_pass->_sp.as<Gp>(), arg.stackOffset());
+ Reg r0, r1;
+
+ VirtReg* vr = cc()->virtRegById(reg.id());
+ uint32_t gpSize = cc()->gpSize();
+ uint32_t instId = 0;
+
+ uint32_t dstTypeId = arg.typeId();
+ uint32_t srcTypeId = vr->typeId();
+
+ switch (dstTypeId) {
+ case Type::kIdI64:
+ case Type::kIdU64:
+ // Extend BYTE->QWORD (GP).
+ if (Type::isGp8(srcTypeId)) {
+ r1.setRegT<Reg::kTypeGpbLo>(reg.id());
+
+ instId = (dstTypeId == Type::kIdI64 && srcTypeId == Type::kIdI8) ? Inst::kIdMovsx : Inst::kIdMovzx;
+ goto ExtendMovGpXQ;
+ }
+
+ // Extend WORD->QWORD (GP).
+ if (Type::isGp16(srcTypeId)) {
+ r1.setRegT<Reg::kTypeGpw>(reg.id());
+
+ instId = (dstTypeId == Type::kIdI64 && srcTypeId == Type::kIdI16) ? Inst::kIdMovsx : Inst::kIdMovzx;
+ goto ExtendMovGpXQ;
+ }
+
+ // Extend DWORD->QWORD (GP).
+ if (Type::isGp32(srcTypeId)) {
+ r1.setRegT<Reg::kTypeGpd>(reg.id());
+
+ instId = Inst::kIdMovsxd;
+ if (dstTypeId == Type::kIdI64 && srcTypeId == Type::kIdI32)
+ goto ExtendMovGpXQ;
+ else
+ goto ZeroExtendGpDQ;
+ }
+
+ // Move QWORD (GP).
+ if (Type::isGp64(srcTypeId)) goto MovGpQ;
+ if (Type::isMmx(srcTypeId)) goto MovMmQ;
+ if (Type::isVec(srcTypeId)) goto MovXmmQ;
+ break;
+
+ case Type::kIdI32:
+ case Type::kIdU32:
+ case Type::kIdI16:
+ case Type::kIdU16:
+ // DWORD <- WORD (Zero|Sign Extend).
+ if (Type::isGp16(srcTypeId)) {
+ bool isDstSigned = dstTypeId == Type::kIdI16 || dstTypeId == Type::kIdI32;
+ bool isSrcSigned = srcTypeId == Type::kIdI8 || srcTypeId == Type::kIdI16;
+
+ r1.setRegT<Reg::kTypeGpw>(reg.id());
+ instId = isDstSigned && isSrcSigned ? Inst::kIdMovsx : Inst::kIdMovzx;
+ goto ExtendMovGpD;
+ }
+
+ // DWORD <- BYTE (Zero|Sign Extend).
+ if (Type::isGp8(srcTypeId)) {
+ bool isDstSigned = dstTypeId == Type::kIdI16 || dstTypeId == Type::kIdI32;
+ bool isSrcSigned = srcTypeId == Type::kIdI8 || srcTypeId == Type::kIdI16;
+
+ r1.setRegT<Reg::kTypeGpbLo>(reg.id());
+ instId = isDstSigned && isSrcSigned ? Inst::kIdMovsx : Inst::kIdMovzx;
+ goto ExtendMovGpD;
+ }
+ ASMJIT_FALLTHROUGH;
+
+ case Type::kIdI8:
+ case Type::kIdU8:
+ if (Type::isInt(srcTypeId)) goto MovGpD;
+ if (Type::isMmx(srcTypeId)) goto MovMmD;
+ if (Type::isVec(srcTypeId)) goto MovXmmD;
+ break;
+
+ case Type::kIdMmx32:
+ case Type::kIdMmx64:
+ // Extend BYTE->QWORD (GP).
+ if (Type::isGp8(srcTypeId)) {
+ r1.setRegT<Reg::kTypeGpbLo>(reg.id());
+
+ instId = Inst::kIdMovzx;
+ goto ExtendMovGpXQ;
+ }
+
+ // Extend WORD->QWORD (GP).
+ if (Type::isGp16(srcTypeId)) {
+ r1.setRegT<Reg::kTypeGpw>(reg.id());
+
+ instId = Inst::kIdMovzx;
+ goto ExtendMovGpXQ;
+ }
+
+ if (Type::isGp32(srcTypeId)) goto ExtendMovGpDQ;
+ if (Type::isGp64(srcTypeId)) goto MovGpQ;
+ if (Type::isMmx(srcTypeId)) goto MovMmQ;
+ if (Type::isVec(srcTypeId)) goto MovXmmQ;
+ break;
+
+ case Type::kIdF32:
+ case Type::kIdF32x1:
+ if (Type::isVec(srcTypeId)) goto MovXmmD;
+ break;
+
+ case Type::kIdF64:
+ case Type::kIdF64x1:
+ if (Type::isVec(srcTypeId)) goto MovXmmQ;
+ break;
+
+ default:
+ // TODO: Vector types by stack.
+ break;
+ }
+ return DebugUtils::errored(kErrorInvalidState);
+
+ // Extend+Move Gp.
+ExtendMovGpD:
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeGpd>(reg.id());
+
+ ASMJIT_PROPAGATE(cc()->emit(instId, r0, r1));
+ ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, mem, r0));
+ return kErrorOk;
+
+ExtendMovGpXQ:
+ if (gpSize == 8) {
+ mem.setSize(8);
+ r0.setRegT<Reg::kTypeGpq>(reg.id());
+
+ ASMJIT_PROPAGATE(cc()->emit(instId, r0, r1));
+ ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, mem, r0));
+ }
+ else {
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeGpd>(reg.id());
+
+ ASMJIT_PROPAGATE(cc()->emit(instId, r0, r1));
+
+ExtendMovGpDQ:
+ ASMJIT_PROPAGATE(cc()->emit(Inst::kIdMov, mem, r0));
+ mem.addOffsetLo32(4);
+ ASMJIT_PROPAGATE(cc()->emit(Inst::kIdAnd, mem, 0));
+ }
+ return kErrorOk;
+
+ZeroExtendGpDQ:
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeGpd>(reg.id());
+ goto ExtendMovGpDQ;
+
+MovGpD:
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeGpd>(reg.id());
+ return cc()->emit(Inst::kIdMov, mem, r0);
+
+MovGpQ:
+ mem.setSize(8);
+ r0.setRegT<Reg::kTypeGpq>(reg.id());
+ return cc()->emit(Inst::kIdMov, mem, r0);
+
+MovMmD:
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeMm>(reg.id());
+ return cc()->emit(choose(Inst::kIdMovd, Inst::kIdVmovd), mem, r0);
+
+MovMmQ:
+ mem.setSize(8);
+ r0.setRegT<Reg::kTypeMm>(reg.id());
+ return cc()->emit(choose(Inst::kIdMovq, Inst::kIdVmovq), mem, r0);
+
+MovXmmD:
+ mem.setSize(4);
+ r0.setRegT<Reg::kTypeXmm>(reg.id());
+ return cc()->emit(choose(Inst::kIdMovss, Inst::kIdVmovss), mem, r0);
+
+MovXmmQ:
+ mem.setSize(8);
+ r0.setRegT<Reg::kTypeXmm>(reg.id());
+ return cc()->emit(choose(Inst::kIdMovlps, Inst::kIdVmovlps), mem, r0);
+}
+
+// ============================================================================
+// [asmjit::x86::X86RACFGBuilder - OnReg]
+// ============================================================================
+
+Error X86RACFGBuilder::onBeforeRet(FuncRetNode* funcRet) noexcept {
+ const FuncDetail& funcDetail = _pass->func()->detail();
+ const Operand* opArray = funcRet->operands();
+ uint32_t opCount = funcRet->opCount();
+
+ cc()->_setCursor(funcRet->prev());
+
+ for (uint32_t i = 0; i < opCount; i++) {
+ const Operand& op = opArray[i];
+ const FuncValue& ret = funcDetail.ret(i);
+
+ if (!op.isReg())
+ continue;
+
+ if (ret.regType() == Reg::kTypeSt) {
+ const Reg& reg = op.as<Reg>();
+ uint32_t vIndex = Operand::virtIdToIndex(reg.id());
+
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ if (workReg->group() != Reg::kGroupVec)
+ return DebugUtils::errored(kErrorInvalidAssignment);
+
+ Reg src = Reg(workReg->signature(), workReg->virtId());
+ Mem mem;
+
+ uint32_t typeId = Type::baseOf(workReg->typeId());
+ if (ret.hasTypeId())
+ typeId = ret.typeId();
+
+ switch (typeId) {
+ case Type::kIdF32:
+ ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 4, 4));
+ mem.setSize(4);
+ ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovss, Inst::kIdVmovss), mem, src.as<Xmm>()));
+ ASMJIT_PROPAGATE(cc()->fld(mem));
+ break;
+
+ case Type::kIdF64:
+ ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 8, 4));
+ mem.setSize(8);
+ ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovsd, Inst::kIdVmovsd), mem, src.as<Xmm>()));
+ ASMJIT_PROPAGATE(cc()->fld(mem));
+ break;
+
+ default:
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+Error X86RACFGBuilder::onRet(FuncRetNode* funcRet, RAInstBuilder& ib) noexcept {
+ const FuncDetail& funcDetail = _pass->func()->detail();
+ const Operand* opArray = funcRet->operands();
+ uint32_t opCount = funcRet->opCount();
+
+ for (uint32_t i = 0; i < opCount; i++) {
+ const Operand& op = opArray[i];
+ if (op.isNone()) continue;
+
+ const FuncValue& ret = funcDetail.ret(i);
+ if (ASMJIT_UNLIKELY(!ret.isReg()))
+ return DebugUtils::errored(kErrorInvalidAssignment);
+
+ // Not handled here...
+ if (ret.regType() == Reg::kTypeSt)
+ continue;
+
+ if (op.isReg()) {
+ // Register return value.
+ const Reg& reg = op.as<Reg>();
+ uint32_t vIndex = Operand::virtIdToIndex(reg.id());
+
+ if (vIndex < Operand::kVirtIdCount) {
+ RAWorkReg* workReg;
+ ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(vIndex, &workReg));
+
+ uint32_t group = workReg->group();
+ uint32_t allocable = _pass->_availableRegs[group];
+ ASMJIT_PROPAGATE(ib.add(workReg, RATiedReg::kUse | RATiedReg::kRead, allocable, ret.regId(), 0, BaseReg::kIdBad, 0));
+ }
+ }
+ else {
+ return DebugUtils::errored(kErrorInvalidAssignment);
+ }
+ }
+
+ return kErrorOk;
+}
+
+// ============================================================================
+// [asmjit::x86::X86RAPass - Construction / Destruction]
+// ============================================================================
+
+X86RAPass::X86RAPass() noexcept
+ : RAPass(),
+ _avxEnabled(false) {}
+X86RAPass::~X86RAPass() noexcept {}
+
+// ============================================================================
+// [asmjit::x86::X86RAPass - OnInit / OnDone]
+// ============================================================================
+
+void X86RAPass::onInit() noexcept {
+ uint32_t archId = cc()->archId();
+ uint32_t baseRegCount = archId == ArchInfo::kIdX86 ? 8u : 16u;
+
+ _archRegsInfo = &opData.archRegs;
+ _archTraits[Reg::kGroupGp] |= RAArchTraits::kHasSwap;
+
+ _physRegCount.set(Reg::kGroupGp , baseRegCount);
+ _physRegCount.set(Reg::kGroupVec , baseRegCount);
+ _physRegCount.set(Reg::kGroupMm , 8);
+ _physRegCount.set(Reg::kGroupKReg, 8);
+ _buildPhysIndex();
+
+ _availableRegCount = _physRegCount;
+ _availableRegs[Reg::kGroupGp ] = Support::lsbMask<uint32_t>(_physRegCount.get(Reg::kGroupGp ));
+ _availableRegs[Reg::kGroupVec ] = Support::lsbMask<uint32_t>(_physRegCount.get(Reg::kGroupVec ));
+ _availableRegs[Reg::kGroupMm ] = Support::lsbMask<uint32_t>(_physRegCount.get(Reg::kGroupMm ));
+ _availableRegs[Reg::kGroupKReg] = Support::lsbMask<uint32_t>(_physRegCount.get(Reg::kGroupKReg));
+
+ _scratchRegIndexes[0] = uint8_t(Gp::kIdCx);
+ _scratchRegIndexes[1] = uint8_t(baseRegCount - 1);
+
+ // The architecture specific setup makes implicitly all registers available. So
+ // make unavailable all registers that are special and cannot be used in general.
+ bool hasFP = _func->frame().hasPreservedFP();
+
+ makeUnavailable(Reg::kGroupGp, Gp::kIdSp); // ESP|RSP used as a stack-pointer (SP).
+ if (hasFP) makeUnavailable(Reg::kGroupGp, Gp::kIdBp); // EBP|RBP used as a frame-pointer (FP).
+
+ _sp = cc()->zsp();
+ _fp = cc()->zbp();
+ _avxEnabled = _func->frame().isAvxEnabled();
+}
+
+void X86RAPass::onDone() noexcept {}
+
+// ============================================================================
+// [asmjit::x86::X86RAPass - BuildCFG]
+// ============================================================================
+
+Error X86RAPass::buildCFG() noexcept {
+ return X86RACFGBuilder(this).run();
+}
+
+// ============================================================================
+// [asmjit::x86::X86RAPass - OnEmit]
+// ============================================================================
+
+Error X86RAPass::onEmitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
+ RAWorkReg* wReg = workRegById(workId);
+ BaseReg dst(wReg->info().signature(), dstPhysId);
+ BaseReg src(wReg->info().signature(), srcPhysId);
+
+ const char* comment = nullptr;
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_loggerFlags & FormatOptions::kFlagAnnotations) {
+ _tmpString.assignFormat("<MOVE> %s", workRegById(workId)->name());
+ comment = _tmpString.data();
+ }
+#endif
+
+ return X86Internal::emitRegMove(cc()->as<Emitter>(), dst, src, wReg->typeId(), _avxEnabled, comment);
+}
+
+Error X86RAPass::onEmitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
+ RAWorkReg* waReg = workRegById(aWorkId);
+ RAWorkReg* wbReg = workRegById(bWorkId);
+
+ bool is64Bit = Support::max(waReg->typeId(), wbReg->typeId()) >= Type::kIdI64;
+ uint32_t sign = is64Bit ? uint32_t(RegTraits<Reg::kTypeGpq>::kSignature)
+ : uint32_t(RegTraits<Reg::kTypeGpd>::kSignature);
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_loggerFlags & FormatOptions::kFlagAnnotations) {
+ _tmpString.assignFormat("<SWAP> %s, %s", waReg->name(), wbReg->name());
+ cc()->setInlineComment(_tmpString.data());
+ }
+#endif
+
+ return cc()->emit(Inst::kIdXchg, Reg(sign, aPhysId), Reg(sign, bPhysId));
+}
+
+Error X86RAPass::onEmitLoad(uint32_t workId, uint32_t dstPhysId) noexcept {
+ RAWorkReg* wReg = workRegById(workId);
+ BaseReg dstReg(wReg->info().signature(), dstPhysId);
+ BaseMem srcMem(workRegAsMem(wReg));
+
+ const char* comment = nullptr;
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_loggerFlags & FormatOptions::kFlagAnnotations) {
+ _tmpString.assignFormat("<LOAD> %s", workRegById(workId)->name());
+ comment = _tmpString.data();
+ }
+#endif
+
+ return X86Internal::emitRegMove(cc()->as<Emitter>(), dstReg, srcMem, wReg->typeId(), _avxEnabled, comment);
+}
+
+Error X86RAPass::onEmitSave(uint32_t workId, uint32_t srcPhysId) noexcept {
+ RAWorkReg* wReg = workRegById(workId);
+ BaseMem dstMem(workRegAsMem(wReg));
+ BaseReg srcReg(wReg->info().signature(), srcPhysId);
+
+ const char* comment = nullptr;
+
+#ifndef ASMJIT_NO_LOGGING
+ if (_loggerFlags & FormatOptions::kFlagAnnotations) {
+ _tmpString.assignFormat("<SAVE> %s", workRegById(workId)->name());
+ comment = _tmpString.data();
+ }
+#endif
+
+ return X86Internal::emitRegMove(cc()->as<Emitter>(), dstMem, srcReg, wReg->typeId(), _avxEnabled, comment);
+}
+
+Error X86RAPass::onEmitJump(const Label& label) noexcept {
+ return cc()->jmp(label);
+}
+
+Error X86RAPass::onEmitPreCall(FuncCallNode* call) noexcept {
+ if (call->detail().hasVarArgs()) {
+ uint32_t argCount = call->argCount();
+ const FuncDetail& fd = call->detail();
+
+ switch (call->detail().callConv().id()) {
+ case CallConv::kIdX86SysV64: {
+ // AL register contains the number of arguments passed in XMM register(s).
+ uint32_t n = 0;
+ for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
+ for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
+ if (!fd.hasArg(argIndex + argHi))
+ continue;
+
+ const FuncValue& arg = fd.arg(argIndex + argHi);
+ if (arg.isReg() && Reg::groupOf(arg.regType()) == Reg::kGroupVec)
+ n++;
+ }
+ }
+
+ if (!n)
+ ASMJIT_PROPAGATE(cc()->xor_(eax, eax));
+ else
+ ASMJIT_PROPAGATE(cc()->mov(eax, n));
+ break;
+ }
+
+ case CallConv::kIdX86Win64: {
+ // Each double-precision argument passed in XMM must be also passed in GP.
+ for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
+ for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
+ if (!fd.hasArg(argIndex + argHi))
+ continue;
+
+ const FuncValue& arg = fd.arg(argIndex + argHi);
+ if (arg.isReg() && Reg::groupOf(arg.regType()) == Reg::kGroupVec) {
+ Gp dst = gpq(fd.callConv().passedOrder(Reg::kGroupGp)[argIndex]);
+ Xmm src = xmm(arg.regId());
+ ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovq, Inst::kIdVmovq), dst, src));
+ }
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ return kErrorOk;
+}
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // ASMJIT_BUILD_X86 && !ASMJIT_NO_COMPILER
diff --git a/3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h b/3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h
new file mode 100644
index 00000000000..4fa688b41a3
--- /dev/null
+++ b/3rdparty/asmjit/src/asmjit/x86/x86rapass_p.h
@@ -0,0 +1,118 @@
+// AsmJit - Machine code generation for C++
+//
+// * Official AsmJit Home Page: https://asmjit.com
+// * Official Github Repository: https://github.com/asmjit/asmjit
+//
+// Copyright (c) 2008-2020 The AsmJit Authors
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+#ifndef ASMJIT_X86_X86RAPASS_P_H_INCLUDED
+#define ASMJIT_X86_X86RAPASS_P_H_INCLUDED
+
+#include "../core/api-config.h"
+#ifndef ASMJIT_NO_COMPILER
+
+#include "../core/compiler.h"
+#include "../core/rabuilders_p.h"
+#include "../core/rapass_p.h"
+#include "../x86/x86assembler.h"
+#include "../x86/x86compiler.h"
+
+ASMJIT_BEGIN_SUB_NAMESPACE(x86)
+
+//! \cond INTERNAL
+
+//! \defgroup asmjit_x86_ra X86 RA
+//! \ingroup asmjit_x86
+//!
+//! \brief X86/X64 register allocation.
+
+//! \addtogroup asmjit_x86_ra
+//! \{
+
+// ============================================================================
+// [asmjit::X86RAPass]
+// ============================================================================
+
+//! X86 register allocation pass.
+//!
+//! Takes care of generating function prologs and epilogs, and also performs
+//! register allocation.
+class X86RAPass : public RAPass {
+public:
+ ASMJIT_NONCOPYABLE(X86RAPass)
+ typedef RAPass Base;
+
+ bool _avxEnabled;
+
+ // --------------------------------------------------------------------------
+ // [Construction / Destruction]
+ // --------------------------------------------------------------------------
+
+ X86RAPass() noexcept;
+ virtual ~X86RAPass() noexcept;
+
+ // --------------------------------------------------------------------------
+ // [Accessors]
+ // --------------------------------------------------------------------------
+
+ //! Returns the compiler casted to `x86::Compiler`.
+ inline Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
+
+ // --------------------------------------------------------------------------
+ // [Utilities]
+ // --------------------------------------------------------------------------
+
+ inline uint32_t choose(uint32_t sseInstId, uint32_t avxInstId) noexcept {
+ return _avxEnabled ? avxInstId : sseInstId;
+ }
+
+ // --------------------------------------------------------------------------
+ // [OnInit / OnDone]
+ // --------------------------------------------------------------------------
+
+ void onInit() noexcept override;
+ void onDone() noexcept override;
+
+ // --------------------------------------------------------------------------
+ // [CFG]
+ // --------------------------------------------------------------------------
+
+ Error buildCFG() noexcept override;
+
+ // --------------------------------------------------------------------------
+ // [Emit]
+ // --------------------------------------------------------------------------
+
+ Error onEmitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
+ Error onEmitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
+
+ Error onEmitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
+ Error onEmitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
+
+ Error onEmitJump(const Label& label) noexcept override;
+ Error onEmitPreCall(FuncCallNode* node) noexcept override;
+};
+
+//! \}
+//! \endcond
+
+ASMJIT_END_SUB_NAMESPACE
+
+#endif // !ASMJIT_NO_COMPILER
+#endif // ASMJIT_X86_X86RAPASS_P_H_INCLUDED