summaryrefslogtreecommitdiffstatshomepage
path: root/3rdparty/asmjit/src/asmjit/core/jitallocator.h
diff options
context:
space:
mode:
Diffstat (limited to '3rdparty/asmjit/src/asmjit/core/jitallocator.h')
-rw-r--r--3rdparty/asmjit/src/asmjit/core/jitallocator.h544
1 files changed, 418 insertions, 126 deletions
diff --git a/3rdparty/asmjit/src/asmjit/core/jitallocator.h b/3rdparty/asmjit/src/asmjit/core/jitallocator.h
index 9cd0a1f7b7e..b694f8cd535 100644
--- a/3rdparty/asmjit/src/asmjit/core/jitallocator.h
+++ b/3rdparty/asmjit/src/asmjit/core/jitallocator.h
@@ -1,25 +1,7 @@
-// AsmJit - Machine code generation for C++
+// This file is part of AsmJit project <https://asmjit.com>
//
-// * Official AsmJit Home Page: https://asmjit.com
-// * Official Github Repository: https://github.com/asmjit/asmjit
-//
-// Copyright (c) 2008-2020 The AsmJit Authors
-//
-// This software is provided 'as-is', without any express or implied
-// warranty. In no event will the authors be held liable for any damages
-// arising from the use of this software.
-//
-// Permission is granted to anyone to use this software for any purpose,
-// including commercial applications, and to alter it and redistribute it
-// freely, subject to the following restrictions:
-//
-// 1. The origin of this software must not be misrepresented; you must not
-// claim that you wrote the original software. If you use this software
-// in a product, an acknowledgment in the product documentation would be
-// appreciated but is not required.
-// 2. Altered source versions must be plainly marked as such, and must not be
-// misrepresented as being the original software.
-// 3. This notice may not be removed or altered from any source distribution.
+// See asmjit.h or LICENSE.md for license and copyright information
+// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
@@ -28,6 +10,7 @@
#ifndef ASMJIT_NO_JIT
#include "../core/globals.h"
+#include "../core/support.h"
#include "../core/virtmem.h"
ASMJIT_BEGIN_NAMESPACE
@@ -35,37 +18,106 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
-// ============================================================================
-// [asmjit::JitAllocator]
-// ============================================================================
+//! Options used by \ref JitAllocator.
+enum class JitAllocatorOptions : uint32_t {
+ //! No options.
+ kNone = 0,
+
+ //! Enables the use of an anonymous memory-mapped memory that is mapped into two buffers having a different pointer.
+ //! The first buffer has read and execute permissions and the second buffer has read+write permissions.
+ //!
+ //! See \ref VirtMem::allocDualMapping() for more details about this feature.
+ //!
+ //! \remarks Dual mapping would be automatically turned on by \ref JitAllocator in case of hardened runtime that
+ //! enforces `W^X` policy, so specifying this flag is essentially forcing to use dual mapped pages even when RWX
+ //! pages can be allocated and dual mapping is not necessary.
+ kUseDualMapping = 0x00000001u,
+
+ //! Enables the use of multiple pools with increasing granularity instead of a single pool. This flag would enable
+ //! 3 internal pools in total having 64, 128, and 256 bytes granularity.
+ //!
+ //! This feature is only recommended for users that generate a lot of code and would like to minimize the overhead
+ //! of `JitAllocator` itself by having blocks of different allocation granularities. Using this feature only for
+ //! few allocations won't pay off as the allocator may need to create more blocks initially before it can take the
+ //! advantage of variable block granularity.
+ kUseMultiplePools = 0x00000002u,
+
+ //! Always fill reserved memory by a fill-pattern.
+ //!
+ //! Causes a new block to be cleared by the fill pattern and freshly released memory to be cleared before making
+ //! it ready for another use.
+ kFillUnusedMemory = 0x00000004u,
+
+ //! When this flag is set the allocator would immediately release unused blocks during `release()` or `reset()`.
+ //! When this flag is not set the allocator would keep one empty block in each pool to prevent excessive virtual
+ //! memory allocations and deallocations in border cases, which involve constantly allocating and deallocating a
+ //! single block caused by repetitive calling `alloc()` and `release()` when the allocator has either no blocks
+ //! or have all blocks fully occupied.
+ kImmediateRelease = 0x00000008u,
+
+ //! This flag enables placing functions (or allocating memory) at the very beginning of each memory mapped region.
+ //!
+ //! Initially, this was the default behavior. However, LLVM developers working on undefined behavior sanitizer
+ //! (UBSAN) decided that they want to store metadata before each function and to access such metadata before an
+ //! indirect function call. This means that the instrumented code always reads from `[fnPtr - 8]` to decode whether
+ //! the function has his metadata present. However, reading 8 bytes below a function means that if a function is
+ //! placed at the very beginning of a memory mapped region, it could try to read bytes that are inaccessible. And
+ //! since AsmJit can be compiled as a shared library and used by applications instrumented by UBSAN, it's not
+ //! possible to conditionally compile the support only when necessary.
+ //!
+ //! \remarks This flag controls a workaround to make it possible to use LLVM UBSAN with AsmJit's \ref JitAllocator.
+ //! There is no undefined behavior even when `kDisableInitialPadding` is used, however, that doesn't really matter
+ //! as LLVM's UBSAN introduces one, and according to LLVM developers it's a "trade-off". This flag is safe to use
+ //! when the code is not instrumented with LLVM's UBSAN.
+ kDisableInitialPadding = 0x00000010u,
+
+ //! Enables the use of large pages, if they are supported and the process can actually allocate them.
+ //!
+ //! \remarks This flag is a hint - if large pages can be allocated, JitAllocator would try to allocate them.
+ //! However, if the allocation fails, it will still try to fallback to use regular pages as \ref JitAllocator
+ //! is designed to minimize allocation failures, so a regular page is better than no page at all. Also, if a
+ //! block \ref JitAllocator wants to allocate is too small to consume a whole large page, regular page(s) will
+ //! be allocated as well.
+ kUseLargePages = 0x00000020u,
+
+ //! Forces \ref JitAllocator to always align block size to be at least as big as a large page, if large pages are
+ //! enabled. This option does nothing if large pages are disabled.
+ //!
+ //! \remarks If \ref kUseLargePages option is used, the allocator would prefer large pages only when allocating a
+ //! block that has a sufficient size. Usually the allocator first allocates smaller block and when more requests
+ //! come it will start increasing the block size of next allocations. This option makes it sure that even the first
+ //! allocation would be the same as a minimum large page when large pages are enabled and can be allocated.
+ kAlignBlockSizeToLargePage = 0x00000040u,
+
+ //! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
+ kCustomFillPattern = 0x10000000u
+};
+ASMJIT_DEFINE_ENUM_FLAGS(JitAllocatorOptions)
//! A simple implementation of memory manager that uses `asmjit::VirtMem`
//! functions to manage virtual memory for JIT compiled code.
//!
//! Implementation notes:
//!
-//! - Granularity of allocated blocks is different than granularity for a typical
-//! C malloc. In addition, the allocator can use several memory pools having a
-//! different granularity to minimize the maintenance overhead. Multiple pools
+//! - Granularity of allocated blocks is different than granularity for a typical C malloc. In addition, the allocator
+//! can use several memory pools having a different granularity to minimize the maintenance overhead. Multiple pools
//! feature requires `kFlagUseMultiplePools` flag to be set.
//!
-//! - The allocator doesn't store any information in executable memory, instead,
-//! the implementation uses two bit-vectors to manage allocated memory of each
-//! allocator-block. The first bit-vector called 'used' is used to track used
-//! memory (where each bit represents memory size defined by granularity) and
-//! the second bit vector called 'stop' is used as a sentinel to mark where
-//! the allocated area ends.
+//! - The allocator doesn't store any information in executable memory, instead, the implementation uses two
+//! bit-vectors to manage allocated memory of each allocator-block. The first bit-vector called 'used' is used to
+//! track used memory (where each bit represents memory size defined by granularity) and the second bit vector called
+//! 'stop' is used as a sentinel to mark where the allocated area ends.
//!
-//! - Internally, the allocator also uses RB tree to keep track of all blocks
-//! across all pools. Each inserted block is added to the tree so it can be
-//! matched fast during `release()` and `shrink()`.
+//! - Internally, the allocator also uses RB tree to keep track of all blocks across all pools. Each inserted block is
+//! added to the tree so it can be matched fast during `release()` and `shrink()`.
class JitAllocator {
public:
ASMJIT_NONCOPYABLE(JitAllocator)
+ //! Visible \ref JitAllocator implementation data.
struct Impl {
- //! Allocator options, see \ref JitAllocator::Options.
- uint32_t options;
+ //! Allocator options.
+ JitAllocatorOptions options;
//! Base block size (0 if the allocator is not initialized).
uint32_t blockSize;
//! Base granularity (0 if the allocator is not initialized).
@@ -74,47 +126,13 @@ public:
uint32_t fillPattern;
};
+ //! \name Members
+ //! \{
+
//! Allocator implementation (private).
Impl* _impl;
- enum Options : uint32_t {
- //! Enables the use of an anonymous memory-mapped memory that is mapped into
- //! two buffers having a different pointer. The first buffer has read and
- //! execute permissions and the second buffer has read+write permissions.
- //!
- //! See \ref VirtMem::allocDualMapping() for more details about this feature.
- kOptionUseDualMapping = 0x00000001u,
-
- //! Enables the use of multiple pools with increasing granularity instead of
- //! a single pool. This flag would enable 3 internal pools in total having
- //! 64, 128, and 256 bytes granularity.
- //!
- //! This feature is only recommended for users that generate a lot of code
- //! and would like to minimize the overhead of `JitAllocator` itself by
- //! having blocks of different allocation granularities. Using this feature
- //! only for few allocations won't pay off as the allocator may need to
- //! create more blocks initially before it can take the advantage of
- //! variable block granularity.
- kOptionUseMultiplePools = 0x00000002u,
-
- //! Always fill reserved memory by a fill-pattern.
- //!
- //! Causes a new block to be cleared by the fill pattern and freshly
- //! released memory to be cleared before making it ready for another use.
- kOptionFillUnusedMemory = 0x00000004u,
-
- //! When this flag is set the allocator would immediately release unused
- //! blocks during `release()` or `reset()`. When this flag is not set the
- //! allocator would keep one empty block in each pool to prevent excessive
- //! virtual memory allocations and deallocations in border cases, which
- //! involve constantly allocating and deallocating a single block caused
- //! by repetitive calling `alloc()` and `release()` when the allocator has
- //! either no blocks or have all blocks fully occupied.
- kOptionImmediateRelease = 0x00000008u,
-
- //! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
- kOptionCustomFillPattern = 0x10000000u
- };
+ //! \}
//! \name Construction & Destruction
//! \{
@@ -132,50 +150,45 @@ public:
//! JitAllocator allocator(&params);
//! ```
struct CreateParams {
- //! Allocator options, see \ref JitAllocator::Options.
+ //! Allocator options.
//!
//! No options are used by default.
- uint32_t options;
+ JitAllocatorOptions options = JitAllocatorOptions::kNone;
//! Base size of a single block in bytes (default 64kB).
//!
- //! \remarks Block size must be equal or greater to page size and must be
- //! power of 2. If the input is not valid then the default block size will
- //! be used instead.
- uint32_t blockSize;
+ //! \remarks Block size must be equal to or greater than page size and must be power of 2. If the input is not
+ //! valid then the default block size will be used instead.
+ uint32_t blockSize = 0;
- //! Base granularity (and also natural alignment) of allocations in bytes
- //! (default 64).
+ //! Base granularity (and also natural alignment) of allocations in bytes (default 64).
//!
- //! Since the `JitAllocator` uses bit-arrays to mark used memory the
- //! granularity also specifies how many bytes correspond to a single bit in
- //! such bit-array. Higher granularity means more waste of virtual memory
- //! (as it increases the natural alignment), but smaller bit-arrays as less
- //! bits would be required per a single block.
- uint32_t granularity;
+ //! Since the `JitAllocator` uses bit-arrays to mark used memory the granularity also specifies how many bytes
+ //! correspond to a single bit in such bit-array. Higher granularity means more waste of virtual memory (as it
+ //! increases the natural alignment), but smaller bit-arrays as less bits would be required per a single block.
+ uint32_t granularity = 0;
//! Patter to use to fill unused memory.
//!
- //! Only used if \ref kOptionCustomFillPattern is set.
- uint32_t fillPattern;
+ //! Only used if \ref JitAllocatorOptions::kCustomFillPattern is set.
+ uint32_t fillPattern = 0;
// Reset the content of `CreateParams`.
- inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
+ ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = CreateParams{}; }
};
//! Creates a `JitAllocator` instance.
- explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept;
+ ASMJIT_API explicit JitAllocator(const CreateParams* params = nullptr) noexcept;
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
- inline bool isInitialized() const noexcept { return _impl->blockSize == 0; }
+ ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _impl->blockSize == 0; }
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
//!
- //! \remarks This function is not thread-safe as it's designed to be used when
- //! nobody else is using allocator. The reason is that there is no point of
- //1 calling `reset()` when the allocator is still in use.
- ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
+ //! \remarks This function is not thread-safe as it's designed to be used when nobody else is using allocator.
+ //! The reason is that there is no point of calling `reset()` when the allocator is still in use.
+ ASMJIT_API void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept;
//! \}
@@ -183,36 +196,315 @@ public:
//! \{
//! Returns allocator options, see `Flags`.
- inline uint32_t options() const noexcept { return _impl->options; }
+ ASMJIT_INLINE_NODEBUG JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
- inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; }
+ ASMJIT_INLINE_NODEBUG bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
- inline uint32_t blockSize() const noexcept { return _impl->blockSize; }
+ ASMJIT_INLINE_NODEBUG uint32_t blockSize() const noexcept { return _impl->blockSize; }
//! Returns granularity of the allocator.
- inline uint32_t granularity() const noexcept { return _impl->granularity; }
+ ASMJIT_INLINE_NODEBUG uint32_t granularity() const noexcept { return _impl->granularity; }
//! Returns pattern that is used to fill unused memory if `kFlagUseFillPattern` is set.
- inline uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
+ ASMJIT_INLINE_NODEBUG uint32_t fillPattern() const noexcept { return _impl->fillPattern; }
//! \}
//! \name Alloc & Release
//! \{
- //! Allocate `size` bytes of virtual memory.
+ //! A memory reference returned by \ref JitAllocator::alloc().
//!
- //! \remarks This function is thread-safe.
- ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept;
+ //! Span contains everything needed to actually write new code to the memory chunk it references.
+ class Span {
+ public:
+ //! \name Constants
+ //! \{
+
+ //! Span flags
+ enum class Flags : uint32_t {
+ //! No flags.
+ kNone = 0u,
+
+ //! The process has never executed the region of the span.
+ //!
+ //! If this flag is set on a \ref Span it would mean that the allocator can avoid flushing
+ //! instruction cache after a code has been written to it.
+ kInstructionCacheClean = 0x00000001u
+ };
+
+ //! \}
+
+ //! \name Members
+ //! \{
+
+ //! Address of memory that has Read and Execute permissions.
+ void* _rx = nullptr;
+
+ //! Address of memory that has Read and Write permissions.
+ void* _rw = nullptr;
+
+ //! Size of the span in bytes (rounded up to the allocation granularity).
+ size_t _size = 0;
+
+ //! Pointer that references a memory block maintained by \ref JitAllocator.
+ //!
+ //! This pointer is considered private and should never be used nor inspected outside of AsmJit.
+ void* _block = nullptr;
+
+ //! Span flags.
+ Flags _flags = Flags::kNone;
+
+ //! Reserved for future use.
+ uint32_t _reserved = 0;
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ //! Returns a pointer having Read & Execute permissions (references executable memory).
+ //!
+ //! This pointer is never NULL if the allocation succeeded, it points to an executable memory.
+ ASMJIT_INLINE_NODEBUG void* rx() const noexcept { return _rx; }
+
+ //! Returns a pointer having Read & Write permissions (references writable memory).
+ //!
+ //! Depending on the type of the allocation strategy this could either be:
+ //!
+ //! - the same address as returned by `rx()` if the allocator uses RWX mapping (pages have all of Read, Write,
+ //! and Execute permissions) or MAP_JIT, which requires either \ref VirtMem::ProtectJitReadWriteScope or to
+ //! call \ref VirtMem::protectJitMemory() manually.
+ //! - a valid pointer, but not the same as `rx` - this would be valid if dual mapping is used.
+ //! - NULL pointer, in case that the allocation strategy doesn't use RWX, MAP_JIT, or dual mapping. In this
+ //! case only \ref JitAllocator can copy new code into the executable memory referenced by \ref Span.
+ //!
+ //! \note If `rw()` returns a non-null pointer it's important to use either VirtMem::protectJitMemory() or
+ //! \ref VirtMem::ProtectJitReadWriteScope to guard the write, because in case of `MAP_JIT` it would temporarily
+ //! switch the permissions of the pointer to RW (that's per thread permissions).
+ //!
+ //! If \ref VirtMem::ProtectJitReadWriteScope is not used it's important to clear the instruction cache via
+ //! \ref VirtMem::flushInstructionCache() after the write is done.
+ ASMJIT_INLINE_NODEBUG void* rw() const noexcept { return _rw; }
+
+ //! Returns size of this span, aligned to the allocator granularity.
+ ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
+
+ //! Returns span flags.
+ ASMJIT_INLINE_NODEBUG Flags flags() const noexcept { return _flags; }
+
+ //! Shrinks this span to `newSize`.
+ //!
+ //! \note This is the only function that is able to change the size of a span, and it's only use case is to
+ //! shrink the span size during \ref JitAllocator::write(). When the writer detects that the span size shrunk,
+ //! it will automatically shrink the memory used by the span, and propagate the new aligned size to the caller.
+ ASMJIT_INLINE_NODEBUG void shrink(size_t newSize) noexcept { _size = Support::min(_size, newSize); }
+
+ //! Returns whether \ref rw() returns a non-null pointer.
+ ASMJIT_INLINE_NODEBUG bool isDirectlyWritable() const noexcept { return _rw != nullptr; }
+
+ //! \}
+ };
- //! Release a memory returned by `alloc()`.
+ //! Allocates a new memory span of the requested `size`.
+ ASMJIT_API Error alloc(Span& out, size_t size) noexcept;
+
+ //! Releases a memory block returned by `alloc()`.
//!
//! \remarks This function is thread-safe.
- ASMJIT_API Error release(void* ro) noexcept;
+ ASMJIT_API Error release(void* rx) noexcept;
- //! Free extra memory allocated with `p` by restricting it to `newSize` size.
+ //! Frees extra memory allocated with `rx` by shrinking it to the given `newSize`.
//!
//! \remarks This function is thread-safe.
- ASMJIT_API Error shrink(void* ro, size_t newSize) noexcept;
+ ASMJIT_API Error shrink(Span& span, size_t newSize) noexcept;
+
+ //! Queries information about an allocated memory block that contains the given `rx`, and writes it to `out`.
+ //!
+ //! If the pointer is matched, the function returns `kErrorOk` and fills `out` with the corresponding span.
+ ASMJIT_API Error query(Span& out, void* rx) const noexcept;
+
+#if !defined(ASMJIT_NO_DEPRECATED)
+ //! Allocates a new memory block of the requested `size`.
+ ASMJIT_DEPRECATED("Use alloc(Span& out, size_t size) instead")
+ ASMJIT_FORCE_INLINE Error alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcept {
+ Span span;
+ Error err = alloc(span, size);
+ *rwPtrOut = span.rw();
+ *rxPtrOut = span.rx();
+ return err;
+ }
+
+ ASMJIT_DEPRECATED("Use shrink(Span& span, size_t newSize) instead")
+ ASMJIT_FORCE_INLINE Error shrink(void* rxPtr, size_t newSize) noexcept {
+ Span span;
+ ASMJIT_PROPAGATE(query(span, rxPtr));
+ return (span.size() > newSize) ? shrink(span, newSize) : Error(kErrorOk);
+ }
+
+ ASMJIT_DEPRECATED("Use query(Span& out, void* rx) instead")
+ ASMJIT_FORCE_INLINE Error query(void* rxPtr, void** rxPtrOut, void** rwPtrOut, size_t* sizeOut) const noexcept {
+ Span span;
+ Error err = query(span, rxPtr);
+ *rxPtrOut = span.rx();
+ *rwPtrOut = span.rw();
+ *sizeOut = span.size();
+ return err;
+ }
+#endif
+
+ //! \}
+
+ //! \name Write Operations
+ //! \{
+
+ typedef Error (ASMJIT_CDECL* WriteFunc)(Span& span, void* userData) ASMJIT_NOEXCEPT_TYPE;
+
+ ASMJIT_API Error write(
+ Span& span,
+ size_t offset,
+ const void* src,
+ size_t size,
+ VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
+
+ ASMJIT_API Error write(
+ Span& span,
+ WriteFunc writeFunc,
+ void* userData,
+ VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
+
+ template<class Lambda>
+ ASMJIT_FORCE_INLINE Error write(
+ Span& span,
+ Lambda&& lambdaFunc,
+ VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
+
+ WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
+ Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
+ return lambdaFunc(span);
+ };
+ return write(span, wrapperFunc, (void*)(&lambdaFunc), policy);
+ }
+
+ //! \}
+
+ //! \name Write Operations with Scope
+ //! \{
+
+ //! \cond INTERNAL
+
+ //! Write scope data.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope instead.
+ struct WriteScopeData {
+ //! \name Members
+ //! \{
+
+ //! Link to the allocator.
+ JitAllocator* _allocator;
+ //! Cache policy passed to \ref JitAllocator::beginWriteScope().
+ VirtMem::CachePolicy _policy;
+ //! Internal flags used by the implementation.
+ uint32_t _flags;
+ //! Internal data used by the implementation.
+ size_t _data[64];
+
+ //! \}
+ };
+
+ //! Begins a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope constructor instead.
+ ASMJIT_API Error beginWriteScope(WriteScopeData& scope, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept;
+
+ //! Ends a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope destructor instead.
+ ASMJIT_API Error endWriteScope(WriteScopeData& scope) noexcept;
+
+ //! Flushes accumulated changes in a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope destructor or \ref WriteScope::flush() instead.
+ ASMJIT_API Error flushWriteScope(WriteScopeData& scope) noexcept;
+
+ //! Alternative to `JitAllocator::write(span, offset, src, size)`, but under a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
+ ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, size_t offset, const void* src, size_t size) noexcept;
+
+ //! Alternative to `JitAllocator::write(span, writeFunc, userData)`, but under a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
+ ASMJIT_API Error scopedWrite(WriteScopeData& scope, Span& span, WriteFunc writeFunc, void* userData) noexcept;
+
+ //! Alternative to `JitAllocator::write(span, [lambda])`, but under a write `scope`.
+ //!
+ //! This is mostly for internal purposes, please use \ref WriteScope::write() instead.
+ template<class Lambda>
+ inline Error scopedWrite(WriteScopeData& scope, Span& span, Lambda&& lambdaFunc) noexcept {
+ WriteFunc wrapperFunc = [](Span& span, void* userData) noexcept -> Error {
+ Lambda& lambdaFunc = *static_cast<Lambda*>(userData);
+ return lambdaFunc(span);
+ };
+ return scopedWrite(scope, span, wrapperFunc, (void*)(&lambdaFunc));
+ }
+
+ //! \endcond
+
+ //! Write scope can be used to create a single scope that is optimized for writing multiple spans.
+ class WriteScope : public WriteScopeData {
+ public:
+ ASMJIT_NONCOPYABLE(WriteScope)
+
+ //! \name Construction & Destruction
+ //! \{
+
+ // Begins a write scope.
+ inline explicit WriteScope(JitAllocator* allocator, VirtMem::CachePolicy policy = VirtMem::CachePolicy::kDefault) noexcept {
+ allocator->beginWriteScope(*this, policy);
+ }
+
+ // Ends a write scope.
+ inline ~WriteScope() noexcept {
+ if (_allocator)
+ _allocator->endWriteScope(*this);
+ }
+
+ //! \}
+
+ //! \name Accessors
+ //! \{
+
+ ASMJIT_INLINE_NODEBUG JitAllocator* allocator() const noexcept { return _allocator; }
+ ASMJIT_INLINE_NODEBUG VirtMem::CachePolicy policy() const noexcept { return _policy; }
+
+ //! \}
+
+ //! \name Operations
+ //! \{
+
+ //! Similar to `JitAllocator::write(span, offset, src, size)`, but under a write scope.
+ ASMJIT_INLINE_NODEBUG Error write(Span& span, size_t offset, const void* src, size_t size) noexcept {
+ return _allocator->scopedWrite(*this, span, offset, src, size);
+ }
+
+ //! Similar to `JitAllocator::write(span, writeFunc, userData)`, but under a write scope.
+ ASMJIT_INLINE_NODEBUG Error write(Span& span, WriteFunc writeFunc, void* userData) noexcept {
+ return _allocator->scopedWrite(*this, span, writeFunc, userData);
+ }
+
+ //! Similar to `JitAllocator::write(span, <lambda>)`, but under a write scope.
+ template<class Lambda>
+ ASMJIT_INLINE_NODEBUG Error write(Span& span, Lambda&& lambdaFunc) noexcept {
+ return _allocator->scopedWrite(*this, span, lambdaFunc);
+ }
+
+ //! Flushes accumulated changes in this write scope.
+ ASMJIT_INLINE_NODEBUG Error flush() noexcept {
+ return _allocator->flushWriteScope(*this);
+ }
+
+ //! \}
+ };
//! \}
@@ -223,6 +515,8 @@ public:
struct Statistics {
//! Number of blocks `JitAllocator` maintains.
size_t _blockCount;
+ //! Number of active allocations.
+ size_t _allocationCount;
//! How many bytes are currently used / allocated.
size_t _usedSize;
//! How many bytes are currently reserved by the allocator.
@@ -230,34 +524,32 @@ public:
//! Allocation overhead (in bytes) required to maintain all blocks.
size_t _overheadSize;
- inline void reset() noexcept {
- _blockCount = 0;
- _usedSize = 0;
- _reservedSize = 0;
- _overheadSize = 0;
- }
+ //! Resets the statistics to all zeros.
+ ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = Statistics{}; }
//! Returns count of blocks managed by `JitAllocator` at the moment.
- inline size_t blockCount() const noexcept { return _blockCount; }
+ ASMJIT_INLINE_NODEBUG size_t blockCount() const noexcept { return _blockCount; }
+ //! Returns the number of active allocations.
+ ASMJIT_INLINE_NODEBUG size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used.
- inline size_t usedSize() const noexcept { return _usedSize; }
+ ASMJIT_INLINE_NODEBUG size_t usedSize() const noexcept { return _usedSize; }
//! Returns the number of bytes unused by the allocator at the moment.
- inline size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
- //! Returns the total number of bytes bytes reserved by the allocator (sum of sizes of all blocks).
- inline size_t reservedSize() const noexcept { return _reservedSize; }
+ ASMJIT_INLINE_NODEBUG size_t unusedSize() const noexcept { return _reservedSize - _usedSize; }
+ //! Returns the total number of bytes reserved by the allocator (sum of sizes of all blocks).
+ ASMJIT_INLINE_NODEBUG size_t reservedSize() const noexcept { return _reservedSize; }
//! Returns the number of bytes the allocator needs to manage the allocated memory.
- inline size_t overheadSize() const noexcept { return _overheadSize; }
+ ASMJIT_INLINE_NODEBUG size_t overheadSize() const noexcept { return _overheadSize; }
- inline double usedSizeAsPercent() const noexcept {
+ ASMJIT_INLINE_NODEBUG double usedSizeAsPercent() const noexcept {
return (double(usedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
- inline double unusedSizeAsPercent() const noexcept {
+ ASMJIT_INLINE_NODEBUG double unusedSizeAsPercent() const noexcept {
return (double(unusedSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
- inline double overheadSizeAsPercent() const noexcept {
+ ASMJIT_INLINE_NODEBUG double overheadSizeAsPercent() const noexcept {
return (double(overheadSize()) / (double(reservedSize()) + 1e-16)) * 100.0;
}
};