add 2 new allocators, currently using rpmalloc on Windows (mimalloc unused for now)
This commit is contained in:
parent
5398190030
commit
8b1a2d6f0c
@ -136,6 +136,8 @@ endif()
|
||||
|
||||
option(STAR_LUA_APICHECK "Use lua api checks" OFF)
|
||||
option(STAR_USE_JEMALLOC "Use jemalloc allocators" OFF)
|
||||
option(STAR_USE_MIMALLOC "Use mimalloc allocators" OFF)
|
||||
option(STAR_USE_RPMALLOC "Use rpmalloc allocators" OFF)
|
||||
|
||||
# Report all the discovered system / environment settings and all options.
|
||||
|
||||
@ -174,6 +176,7 @@ endif()
|
||||
|
||||
message(STATUS "Using Lua API checks: ${STAR_LUA_APICHECK}")
|
||||
message(STATUS "Using jemalloc: ${STAR_USE_JEMALLOC}")
|
||||
message(STATUS "Using mimalloc: ${STAR_USE_MIMALLOC}")
|
||||
|
||||
# Set C defines and cmake variables based on the build settings we have now
|
||||
# determined...
|
||||
@ -244,6 +247,10 @@ endif()
|
||||
|
||||
if(STAR_USE_JEMALLOC)
|
||||
add_definitions(-DSTAR_USE_JEMALLOC)
|
||||
elseif(STAR_USE_MIMALLOC)
|
||||
add_definitions(-DSTAR_USE_MIMALLOC)
|
||||
elseif(STAR_USE_RPMALLOC)
|
||||
add_definitions(-DSTAR_USE_RPMALLOC -DENABLE_PRELOAD)
|
||||
endif()
|
||||
|
||||
# Set C/C++ compiler flags based on build environment...
|
||||
@ -449,6 +456,11 @@ if(STAR_USE_JEMALLOC)
|
||||
set(STAR_EXT_LIBS ${JEMALLOC_LIBRARY})
|
||||
endif()
|
||||
|
||||
if (STAR_USE_MIMALLOC)
|
||||
find_package(mimalloc CONFIG REQUIRED)
|
||||
set(STAR_EXT_LIBS ${STAR_EXT_LIBS} $<IF:$<TARGET_EXISTS:mimalloc-static>,mimalloc-static,mimalloc>)
|
||||
endif()
|
||||
|
||||
find_package(ZLIB REQUIRED)
|
||||
find_package(PNG REQUIRED)
|
||||
find_package(Freetype REQUIRED)
|
||||
|
@ -31,7 +31,8 @@
|
||||
"VCPKG_TARGET_TRIPLET": "x64-windows-mixed-md",
|
||||
"CMAKE_MSVC_RUNTIME_LIBRARY": "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL",
|
||||
"CMAKE_INCLUDE_PATH": "${sourceParentDir}/lib/windows/include",
|
||||
"CMAKE_LIBRARY_PATH": "${sourceParentDir}/lib/windows"
|
||||
"CMAKE_LIBRARY_PATH": "${sourceParentDir}/lib/windows",
|
||||
"STAR_USE_RPMALLOC": true
|
||||
},
|
||||
"vendor": {
|
||||
"microsoft.com/VisualStudioSettings/CMake/1.0": {
|
||||
|
@ -2,6 +2,18 @@
|
||||
|
||||
#ifdef STAR_USE_JEMALLOC
|
||||
#include "jemalloc/jemalloc.h"
|
||||
#elif STAR_USE_MIMALLOC
|
||||
#include "mimalloc.h"
|
||||
#elif STAR_USE_RPMALLOC
|
||||
#include "rpnew.h"
|
||||
|
||||
bool rpm_linker_ref() {
|
||||
rpmalloc_linker_reference();
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _rpm_linker_ref = rpm_linker_ref();
|
||||
|
||||
#endif
|
||||
|
||||
namespace Star {
|
||||
@ -42,6 +54,38 @@ namespace Star {
|
||||
::sdallocx(ptr, size, 0);
|
||||
}
|
||||
#endif
|
||||
#elif STAR_USE_MIMALLOC
|
||||
void* malloc(size_t size) {
|
||||
return mi_malloc(size);
|
||||
}
|
||||
|
||||
void* realloc(void* ptr, size_t size) {
|
||||
return mi_realloc(ptr, size);
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
return mi_free(ptr);
|
||||
}
|
||||
|
||||
void free(void* ptr, size_t size) {
|
||||
return mi_free_size(ptr, size);
|
||||
}
|
||||
#elif STAR_USE_RPMALLOC
|
||||
void* malloc(size_t size) {
|
||||
return rpmalloc(size);
|
||||
}
|
||||
|
||||
void* realloc(void* ptr, size_t size) {
|
||||
return rprealloc(ptr, size);
|
||||
}
|
||||
|
||||
void free(void* ptr) {
|
||||
return rpfree(ptr);
|
||||
}
|
||||
|
||||
void free(void* ptr, size_t) {
|
||||
return rpfree(ptr);
|
||||
}
|
||||
#else
|
||||
void* malloc(size_t size) {
|
||||
return ::malloc(size);
|
||||
@ -62,6 +106,9 @@ namespace Star {
|
||||
|
||||
}
|
||||
|
||||
#ifndef STAR_USE_RPMALLOC
|
||||
|
||||
|
||||
void* operator new(std::size_t size) {
|
||||
auto ptr = Star::malloc(size);
|
||||
if (!ptr)
|
||||
@ -110,3 +157,5 @@ void operator delete(void* ptr, std::size_t size) noexcept {
|
||||
void operator delete[](void* ptr, std::size_t size) noexcept {
|
||||
Star::free(ptr, size);
|
||||
}
|
||||
|
||||
#endif
|
11
source/extern/CMakeLists.txt
vendored
11
source/extern/CMakeLists.txt
vendored
@ -70,4 +70,15 @@ SET (star_extern_SOURCES
|
||||
lua/lzio.c
|
||||
)
|
||||
|
||||
|
||||
IF (STAR_USE_RPMALLOC)
|
||||
SET(star_extern_HEADERS ${star_extern_HEADERS}
|
||||
rpmalloc.h
|
||||
rpnew.h
|
||||
)
|
||||
SET(star_extern_SOURCES ${star_extern_SOURCES}
|
||||
rpmalloc.c
|
||||
)
|
||||
ENDIF()
|
||||
|
||||
ADD_LIBRARY (star_extern OBJECT ${star_extern_SOURCES} ${star_extern_HEADERS})
|
504
source/extern/malloc.c
vendored
Normal file
504
source/extern/malloc.c
vendored
Normal file
@ -0,0 +1,504 @@
|
||||
/* malloc.c - Memory allocator - Public Domain - 2016 Mattias Jansson
|
||||
*
|
||||
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
|
||||
* The latest source code is always available at
|
||||
*
|
||||
* https://github.com/mjansson/rpmalloc
|
||||
*
|
||||
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
|
||||
*
|
||||
*/
|
||||
|
||||
//
|
||||
// This file provides overrides for the standard library malloc entry points for C and new/delete operators for C++
|
||||
// It also provides automatic initialization/finalization of process and threads
|
||||
//
|
||||
#if defined(__TINYC__)
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
|
||||
#ifndef ARCH_64BIT
|
||||
# if defined(__LLP64__) || defined(__LP64__) || defined(_WIN64)
|
||||
# define ARCH_64BIT 1
|
||||
# else
|
||||
# define ARCH_64BIT 0
|
||||
_Static_assert(sizeof(size_t) == 4, "Data type size mismatch");
|
||||
_Static_assert(sizeof(void*) == 4, "Data type size mismatch");
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__))
|
||||
#pragma GCC visibility push(default)
|
||||
#endif
|
||||
|
||||
#define USE_IMPLEMENT 1
|
||||
#define USE_INTERPOSE 0
|
||||
#define USE_ALIAS 0
|
||||
|
||||
#if defined(__APPLE__)
|
||||
#undef USE_INTERPOSE
|
||||
#define USE_INTERPOSE 1
|
||||
|
||||
typedef struct interpose_t {
|
||||
void* new_func;
|
||||
void* orig_func;
|
||||
} interpose_t;
|
||||
|
||||
#define MAC_INTERPOSE_PAIR(newf, oldf) { (void*)newf, (void*)oldf }
|
||||
#define MAC_INTERPOSE_SINGLE(newf, oldf) \
|
||||
__attribute__((used)) static const interpose_t macinterpose##newf##oldf \
|
||||
__attribute__ ((section("__DATA, __interpose"))) = MAC_INTERPOSE_PAIR(newf, oldf)
|
||||
|
||||
#endif
|
||||
|
||||
#if !defined(_WIN32) && !defined(__APPLE__)
|
||||
#undef USE_IMPLEMENT
|
||||
#undef USE_ALIAS
|
||||
#define USE_IMPLEMENT 0
|
||||
#define USE_ALIAS 1
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning (disable : 4100)
|
||||
#undef malloc
|
||||
#undef free
|
||||
#undef calloc
|
||||
#define RPMALLOC_RESTRICT __declspec(restrict)
|
||||
#else
|
||||
#define RPMALLOC_RESTRICT
|
||||
#endif
|
||||
|
||||
#if ENABLE_OVERRIDE
|
||||
|
||||
typedef struct rp_nothrow_t { int __dummy; } rp_nothrow_t;
|
||||
|
||||
#if USE_IMPLEMENT
|
||||
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL malloc(size_t size) { return rpmalloc(size); }
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL calloc(size_t count, size_t size) { return rpcalloc(count, size); }
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL realloc(void* ptr, size_t size) { return rprealloc(ptr, size); }
|
||||
extern inline void* RPMALLOC_CDECL reallocf(void* ptr, size_t size) { return rprealloc(ptr, size); }
|
||||
extern inline void* RPMALLOC_CDECL aligned_alloc(size_t alignment, size_t size) { return rpaligned_alloc(alignment, size); }
|
||||
extern inline void* RPMALLOC_CDECL memalign(size_t alignment, size_t size) { return rpmemalign(alignment, size); }
|
||||
extern inline int RPMALLOC_CDECL posix_memalign(void** memptr, size_t alignment, size_t size) { return rpposix_memalign(memptr, alignment, size); }
|
||||
extern inline void RPMALLOC_CDECL free(void* ptr) { rpfree(ptr); }
|
||||
extern inline void RPMALLOC_CDECL cfree(void* ptr) { rpfree(ptr); }
|
||||
extern inline size_t RPMALLOC_CDECL malloc_usable_size(void* ptr) { return rpmalloc_usable_size(ptr); }
|
||||
extern inline size_t RPMALLOC_CDECL malloc_size(void* ptr) { return rpmalloc_usable_size(ptr); }
|
||||
|
||||
#ifdef _WIN32
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL _malloc_base(size_t size) { return rpmalloc(size); }
|
||||
extern inline void RPMALLOC_CDECL _free_base(void* ptr) { rpfree(ptr); }
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL _calloc_base(size_t count, size_t size) { return rpcalloc(count, size); }
|
||||
extern inline size_t RPMALLOC_CDECL _msize(void* ptr) { return rpmalloc_usable_size(ptr); }
|
||||
extern inline size_t RPMALLOC_CDECL _msize_base(void* ptr) { return rpmalloc_usable_size(ptr); }
|
||||
extern inline RPMALLOC_RESTRICT void* RPMALLOC_CDECL _realloc_base(void* ptr, size_t size) { return rprealloc(ptr, size); }
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
// For Windows, #include <rpnew.h> in one source file to get the C++ operator overrides implemented in your module
|
||||
#else
|
||||
// Overload the C++ operators using the mangled names (https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling)
|
||||
// operators delete and delete[]
|
||||
#define RPDEFVIS __attribute__((visibility("default")))
|
||||
extern void _ZdlPv(void* p); void RPDEFVIS _ZdlPv(void* p) { rpfree(p); }
|
||||
extern void _ZdaPv(void* p); void RPDEFVIS _ZdaPv(void* p) { rpfree(p); }
|
||||
#if ARCH_64BIT
|
||||
// 64-bit operators new and new[], normal and aligned
|
||||
extern void* _Znwm(uint64_t size); void* RPDEFVIS _Znwm(uint64_t size) { return rpmalloc(size); }
|
||||
extern void* _Znam(uint64_t size); void* RPDEFVIS _Znam(uint64_t size) { return rpmalloc(size); }
|
||||
extern void* _Znwmm(uint64_t size, uint64_t align); void* RPDEFVIS _Znwmm(uint64_t size, uint64_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _Znamm(uint64_t size, uint64_t align); void* RPDEFVIS _Znamm(uint64_t size, uint64_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnwmSt11align_val_t(uint64_t size, uint64_t align); void* RPDEFVIS _ZnwmSt11align_val_t(uint64_t size, uint64_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnamSt11align_val_t(uint64_t size, uint64_t align); void* RPDEFVIS _ZnamSt11align_val_t(uint64_t size, uint64_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnwmRKSt9nothrow_t(uint64_t size, rp_nothrow_t t); void* RPDEFVIS _ZnwmRKSt9nothrow_t(uint64_t size, rp_nothrow_t t) { (void)sizeof(t); return rpmalloc(size); }
|
||||
extern void* _ZnamRKSt9nothrow_t(uint64_t size, rp_nothrow_t t); void* RPDEFVIS _ZnamRKSt9nothrow_t(uint64_t size, rp_nothrow_t t) { (void)sizeof(t); return rpmalloc(size); }
|
||||
extern void* _ZnwmSt11align_val_tRKSt9nothrow_t(uint64_t size, uint64_t align, rp_nothrow_t t); void* RPDEFVIS _ZnwmSt11align_val_tRKSt9nothrow_t(uint64_t size, uint64_t align, rp_nothrow_t t) { (void)sizeof(t); return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnamSt11align_val_tRKSt9nothrow_t(uint64_t size, uint64_t align, rp_nothrow_t t); void* RPDEFVIS _ZnamSt11align_val_tRKSt9nothrow_t(uint64_t size, uint64_t align, rp_nothrow_t t) { (void)sizeof(t); return rpaligned_alloc(align, size); }
|
||||
// 64-bit operators sized delete and delete[], normal and aligned
|
||||
extern void _ZdlPvm(void* p, uint64_t size); void RPDEFVIS _ZdlPvm(void* p, uint64_t size) { rpfree(p); (void)sizeof(size); }
|
||||
extern void _ZdaPvm(void* p, uint64_t size); void RPDEFVIS _ZdaPvm(void* p, uint64_t size) { rpfree(p); (void)sizeof(size); }
|
||||
extern void _ZdlPvSt11align_val_t(void* p, uint64_t align); void RPDEFVIS _ZdlPvSt11align_val_t(void* p, uint64_t align) { rpfree(p); (void)sizeof(align); }
|
||||
extern void _ZdaPvSt11align_val_t(void* p, uint64_t align); void RPDEFVIS _ZdaPvSt11align_val_t(void* p, uint64_t align) { rpfree(p); (void)sizeof(align); }
|
||||
extern void _ZdlPvmSt11align_val_t(void* p, uint64_t size, uint64_t align); void RPDEFVIS _ZdlPvmSt11align_val_t(void* p, uint64_t size, uint64_t align) { rpfree(p); (void)sizeof(size); (void)sizeof(align); }
|
||||
extern void _ZdaPvmSt11align_val_t(void* p, uint64_t size, uint64_t align); void RPDEFVIS _ZdaPvmSt11align_val_t(void* p, uint64_t size, uint64_t align) { rpfree(p); (void)sizeof(size); (void)sizeof(align); }
|
||||
#else
|
||||
// 32-bit operators new and new[], normal and aligned
|
||||
extern void* _Znwj(uint32_t size); void* RPDEFVIS _Znwj(uint32_t size) { return rpmalloc(size); }
|
||||
extern void* _Znaj(uint32_t size); void* RPDEFVIS _Znaj(uint32_t size) { return rpmalloc(size); }
|
||||
extern void* _Znwjj(uint32_t size, uint32_t align); void* RPDEFVIS _Znwjj(uint32_t size, uint32_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _Znajj(uint32_t size, uint32_t align); void* RPDEFVIS _Znajj(uint32_t size, uint32_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnwjSt11align_val_t(size_t size, size_t align); void* RPDEFVIS _ZnwjSt11align_val_t(size_t size, size_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnajSt11align_val_t(size_t size, size_t align); void* RPDEFVIS _ZnajSt11align_val_t(size_t size, size_t align) { return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnwjRKSt9nothrow_t(size_t size, rp_nothrow_t t); void* RPDEFVIS _ZnwjRKSt9nothrow_t(size_t size, rp_nothrow_t t) { (void)sizeof(t); return rpmalloc(size); }
|
||||
extern void* _ZnajRKSt9nothrow_t(size_t size, rp_nothrow_t t); void* RPDEFVIS _ZnajRKSt9nothrow_t(size_t size, rp_nothrow_t t) { (void)sizeof(t); return rpmalloc(size); }
|
||||
extern void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t); void* RPDEFVIS _ZnwjSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) { (void)sizeof(t); return rpaligned_alloc(align, size); }
|
||||
extern void* _ZnajSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t); void* RPDEFVIS _ZnajSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) { (void)sizeof(t); return rpaligned_alloc(align, size); }
|
||||
// 32-bit operators sized delete and delete[], normal and aligned
|
||||
extern void _ZdlPvj(void* p, uint64_t size); void RPDEFVIS _ZdlPvj(void* p, uint64_t size) { rpfree(p); (void)sizeof(size); }
|
||||
extern void _ZdaPvj(void* p, uint64_t size); void RPDEFVIS _ZdaPvj(void* p, uint64_t size) { rpfree(p); (void)sizeof(size); }
|
||||
extern void _ZdlPvSt11align_val_t(void* p, uint32_t align); void RPDEFVIS _ZdlPvSt11align_val_t(void* p, uint64_t a) { rpfree(p); (void)sizeof(align); }
|
||||
extern void _ZdaPvSt11align_val_t(void* p, uint32_t align); void RPDEFVIS _ZdaPvSt11align_val_t(void* p, uint64_t a) { rpfree(p); (void)sizeof(align); }
|
||||
extern void _ZdlPvjSt11align_val_t(void* p, uint32_t size, uint32_t align); void RPDEFVIS _ZdlPvjSt11align_val_t(void* p, uint64_t size, uint64_t align) { rpfree(p); (void)sizeof(size); (void)sizeof(a); }
|
||||
extern void _ZdaPvjSt11align_val_t(void* p, uint32_t size, uint32_t align); void RPDEFVIS _ZdaPvjSt11align_val_t(void* p, uint64_t size, uint64_t align) { rpfree(p); (void)sizeof(size); (void)sizeof(a); }
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if USE_INTERPOSE || USE_ALIAS
|
||||
|
||||
static void* rpmalloc_nothrow(size_t size, rp_nothrow_t t) { (void)sizeof(t); return rpmalloc(size); }
|
||||
static void* rpaligned_alloc_reverse(size_t size, size_t align) { return rpaligned_alloc(align, size); }
|
||||
static void* rpaligned_alloc_reverse_nothrow(size_t size, size_t align, rp_nothrow_t t) { (void)sizeof(t); return rpaligned_alloc(align, size); }
|
||||
static void rpfree_size(void* p, size_t size) { (void)sizeof(size); rpfree(p); }
|
||||
static void rpfree_aligned(void* p, size_t align) { (void)sizeof(align); rpfree(p); }
|
||||
static void rpfree_size_aligned(void* p, size_t size, size_t align) { (void)sizeof(size); (void)sizeof(align); rpfree(p); }
|
||||
|
||||
#endif
|
||||
|
||||
#if USE_INTERPOSE
|
||||
|
||||
__attribute__((used)) static const interpose_t macinterpose_malloc[]
|
||||
__attribute__ ((section("__DATA, __interpose"))) = {
|
||||
//new and new[]
|
||||
MAC_INTERPOSE_PAIR(rpmalloc, _Znwm),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc, _Znam),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse, _Znwmm),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse, _Znamm),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc_nothrow, _ZnwmRKSt9nothrow_t),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc_nothrow, _ZnamRKSt9nothrow_t),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse, _ZnwmSt11align_val_t),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse, _ZnamSt11align_val_t),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse_nothrow, _ZnwmSt11align_val_tRKSt9nothrow_t),
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc_reverse_nothrow, _ZnamSt11align_val_tRKSt9nothrow_t),
|
||||
//delete and delete[]
|
||||
MAC_INTERPOSE_PAIR(rpfree, _ZdlPv),
|
||||
MAC_INTERPOSE_PAIR(rpfree, _ZdaPv),
|
||||
MAC_INTERPOSE_PAIR(rpfree_size, _ZdlPvm),
|
||||
MAC_INTERPOSE_PAIR(rpfree_size, _ZdaPvm),
|
||||
MAC_INTERPOSE_PAIR(rpfree_aligned, _ZdlPvSt11align_val_t),
|
||||
MAC_INTERPOSE_PAIR(rpfree_aligned, _ZdaPvSt11align_val_t),
|
||||
MAC_INTERPOSE_PAIR(rpfree_size_aligned, _ZdlPvmSt11align_val_t),
|
||||
MAC_INTERPOSE_PAIR(rpfree_size_aligned, _ZdaPvmSt11align_val_t),
|
||||
//libc entry points
|
||||
MAC_INTERPOSE_PAIR(rpmalloc, malloc),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc, calloc),
|
||||
MAC_INTERPOSE_PAIR(rprealloc, realloc),
|
||||
MAC_INTERPOSE_PAIR(rprealloc, reallocf),
|
||||
#if defined(__MAC_10_15) && __MAC_OS_X_VERSION_MIN_REQUIRED >= __MAC_10_15
|
||||
MAC_INTERPOSE_PAIR(rpaligned_alloc, aligned_alloc),
|
||||
#endif
|
||||
MAC_INTERPOSE_PAIR(rpmemalign, memalign),
|
||||
MAC_INTERPOSE_PAIR(rpposix_memalign, posix_memalign),
|
||||
MAC_INTERPOSE_PAIR(rpfree, free),
|
||||
MAC_INTERPOSE_PAIR(rpfree, cfree),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc_usable_size, malloc_usable_size),
|
||||
MAC_INTERPOSE_PAIR(rpmalloc_usable_size, malloc_size)
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if USE_ALIAS
|
||||
|
||||
#define RPALIAS(fn) __attribute__((alias(#fn), used, visibility("default")));
|
||||
|
||||
// Alias the C++ operators using the mangled names (https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling)
|
||||
|
||||
// operators delete and delete[]
|
||||
void _ZdlPv(void* p) RPALIAS(rpfree)
|
||||
void _ZdaPv(void* p) RPALIAS(rpfree)
|
||||
|
||||
#if ARCH_64BIT
|
||||
// 64-bit operators new and new[], normal and aligned
|
||||
void* _Znwm(uint64_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* _Znam(uint64_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* _Znwmm(uint64_t size, uint64_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _Znamm(uint64_t size, uint64_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnwmSt11align_val_t(size_t size, size_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnamSt11align_val_t(size_t size, size_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnwmRKSt9nothrow_t(size_t size, rp_nothrow_t t) RPALIAS(rpmalloc_nothrow)
|
||||
void* _ZnamRKSt9nothrow_t(size_t size, rp_nothrow_t t) RPALIAS(rpmalloc_nothrow)
|
||||
void* _ZnwmSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) RPALIAS(rpaligned_alloc_reverse_nothrow)
|
||||
void* _ZnamSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) RPALIAS(rpaligned_alloc_reverse_nothrow)
|
||||
// 64-bit operators delete and delete[], sized and aligned
|
||||
void _ZdlPvm(void* p, size_t n) RPALIAS(rpfree_size)
|
||||
void _ZdaPvm(void* p, size_t n) RPALIAS(rpfree_size)
|
||||
void _ZdlPvSt11align_val_t(void* p, size_t a) RPALIAS(rpfree_aligned)
|
||||
void _ZdaPvSt11align_val_t(void* p, size_t a) RPALIAS(rpfree_aligned)
|
||||
void _ZdlPvmSt11align_val_t(void* p, size_t n, size_t a) RPALIAS(rpfree_size_aligned)
|
||||
void _ZdaPvmSt11align_val_t(void* p, size_t n, size_t a) RPALIAS(rpfree_size_aligned)
|
||||
#else
|
||||
// 32-bit operators new and new[], normal and aligned
|
||||
void* _Znwj(uint32_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* _Znaj(uint32_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* _Znwjj(uint32_t size, uint32_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _Znajj(uint32_t size, uint32_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnwjSt11align_val_t(size_t size, size_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnajSt11align_val_t(size_t size, size_t align) RPALIAS(rpaligned_alloc_reverse)
|
||||
void* _ZnwjRKSt9nothrow_t(size_t size, rp_nothrow_t t) RPALIAS(rpmalloc_nothrow)
|
||||
void* _ZnajRKSt9nothrow_t(size_t size, rp_nothrow_t t) RPALIAS(rpmalloc_nothrow)
|
||||
void* _ZnwjSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) RPALIAS(rpaligned_alloc_reverse_nothrow)
|
||||
void* _ZnajSt11align_val_tRKSt9nothrow_t(size_t size, size_t align, rp_nothrow_t t) RPALIAS(rpaligned_alloc_reverse_nothrow)
|
||||
// 32-bit operators delete and delete[], sized and aligned
|
||||
void _ZdlPvj(void* p, size_t n) RPALIAS(rpfree_size)
|
||||
void _ZdaPvj(void* p, size_t n) RPALIAS(rpfree_size)
|
||||
void _ZdlPvSt11align_val_t(void* p, size_t a) RPALIAS(rpfree_aligned)
|
||||
void _ZdaPvSt11align_val_t(void* p, size_t a) RPALIAS(rpfree_aligned)
|
||||
void _ZdlPvjSt11align_val_t(void* p, size_t n, size_t a) RPALIAS(rpfree_size_aligned)
|
||||
void _ZdaPvjSt11align_val_t(void* p, size_t n, size_t a) RPALIAS(rpfree_size_aligned)
|
||||
#endif
|
||||
|
||||
void* malloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* calloc(size_t count, size_t size) RPALIAS(rpcalloc)
|
||||
void* realloc(void* ptr, size_t size) RPALIAS(rprealloc)
|
||||
void* reallocf(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2) RPALIAS(rprealloc)
|
||||
void* aligned_alloc(size_t alignment, size_t size) RPALIAS(rpaligned_alloc)
|
||||
void* memalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2) RPALIAS(rpmemalign)
|
||||
int posix_memalign(void** memptr, size_t alignment, size_t size) RPALIAS(rpposix_memalign)
|
||||
void free(void* ptr) RPALIAS(rpfree)
|
||||
void cfree(void* ptr) RPALIAS(rpfree)
|
||||
#if defined(__ANDROID__) || defined(__FreeBSD__)
|
||||
size_t malloc_usable_size(const void* ptr) RPALIAS(rpmalloc_usable_size)
|
||||
#else
|
||||
size_t malloc_usable_size(void* ptr) RPALIAS(rpmalloc_usable_size)
|
||||
#endif
|
||||
size_t malloc_size(void* ptr) RPALIAS(rpmalloc_usable_size)
|
||||
|
||||
#endif
|
||||
|
||||
static inline size_t
|
||||
_rpmalloc_page_size(void) {
|
||||
return _memory_page_size;
|
||||
}
|
||||
|
||||
extern void* RPMALLOC_CDECL
|
||||
reallocarray(void* ptr, size_t count, size_t size);
|
||||
|
||||
extern void* RPMALLOC_CDECL
|
||||
reallocarray(void* ptr, size_t count, size_t size) {
|
||||
size_t total;
|
||||
#if ENABLE_VALIDATE_ARGS
|
||||
#ifdef _MSC_VER
|
||||
int err = SizeTMult(count, size, &total);
|
||||
if ((err != S_OK) || (total >= MAX_ALLOC_SIZE)) {
|
||||
errno = EINVAL;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int err = __builtin_umull_overflow(count, size, &total);
|
||||
if (err || (total >= MAX_ALLOC_SIZE)) {
|
||||
errno = EINVAL;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else
|
||||
total = count * size;
|
||||
#endif
|
||||
return realloc(ptr, total);
|
||||
}
|
||||
|
||||
extern inline void* RPMALLOC_CDECL
|
||||
valloc(size_t size) {
|
||||
get_thread_heap();
|
||||
return rpaligned_alloc(_rpmalloc_page_size(), size);
|
||||
}
|
||||
|
||||
extern inline void* RPMALLOC_CDECL
|
||||
pvalloc(size_t size) {
|
||||
get_thread_heap();
|
||||
const size_t page_size = _rpmalloc_page_size();
|
||||
const size_t aligned_size = ((size + page_size - 1) / page_size) * page_size;
|
||||
#if ENABLE_VALIDATE_ARGS
|
||||
if (aligned_size < size) {
|
||||
errno = EINVAL;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
return rpaligned_alloc(_rpmalloc_page_size(), aligned_size);
|
||||
}
|
||||
|
||||
#endif // ENABLE_OVERRIDE
|
||||
|
||||
#if ENABLE_PRELOAD
|
||||
|
||||
#ifdef _WIN32
|
||||
|
||||
#if defined(BUILD_DYNAMIC_LINK) && BUILD_DYNAMIC_LINK
|
||||
|
||||
extern __declspec(dllexport) BOOL WINAPI
|
||||
DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved);
|
||||
|
||||
extern __declspec(dllexport) BOOL WINAPI
|
||||
DllMain(HINSTANCE instance, DWORD reason, LPVOID reserved) {
|
||||
(void)sizeof(reserved);
|
||||
(void)sizeof(instance);
|
||||
if (reason == DLL_PROCESS_ATTACH)
|
||||
rpmalloc_initialize();
|
||||
else if (reason == DLL_PROCESS_DETACH)
|
||||
rpmalloc_finalize();
|
||||
else if (reason == DLL_THREAD_ATTACH)
|
||||
rpmalloc_thread_initialize();
|
||||
else if (reason == DLL_THREAD_DETACH)
|
||||
rpmalloc_thread_finalize(1);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
//end BUILD_DYNAMIC_LINK
|
||||
#else
|
||||
|
||||
extern void
|
||||
_global_rpmalloc_init(void) {
|
||||
rpmalloc_set_main_thread();
|
||||
rpmalloc_initialize();
|
||||
}
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
|
||||
static void __attribute__((constructor))
|
||||
initializer(void) {
|
||||
_global_rpmalloc_init();
|
||||
}
|
||||
|
||||
#elif defined(_MSC_VER)
|
||||
|
||||
static int
|
||||
_global_rpmalloc_xib(void) {
|
||||
_global_rpmalloc_init();
|
||||
return 0;
|
||||
}
|
||||
|
||||
#pragma section(".CRT$XIB",read)
|
||||
__declspec(allocate(".CRT$XIB")) void (*_rpmalloc_module_init)(void) = _global_rpmalloc_xib;
|
||||
#pragma comment(linker, "/include:_rpmalloc_module_init")
|
||||
|
||||
#endif
|
||||
|
||||
//end !BUILD_DYNAMIC_LINK
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
#include <unistd.h>
|
||||
|
||||
extern void
|
||||
rpmalloc_set_main_thread(void);
|
||||
|
||||
static pthread_key_t destructor_key;
|
||||
|
||||
static void
|
||||
thread_destructor(void*);
|
||||
|
||||
static void __attribute__((constructor))
|
||||
initializer(void) {
|
||||
rpmalloc_set_main_thread();
|
||||
rpmalloc_initialize();
|
||||
pthread_key_create(&destructor_key, thread_destructor);
|
||||
}
|
||||
|
||||
static void __attribute__((destructor))
|
||||
finalizer(void) {
|
||||
rpmalloc_finalize();
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
void* (*real_start)(void*);
|
||||
void* real_arg;
|
||||
} thread_starter_arg;
|
||||
|
||||
static void*
|
||||
thread_starter(void* argptr) {
|
||||
thread_starter_arg* arg = argptr;
|
||||
void* (*real_start)(void*) = arg->real_start;
|
||||
void* real_arg = arg->real_arg;
|
||||
rpmalloc_thread_initialize();
|
||||
rpfree(argptr);
|
||||
pthread_setspecific(destructor_key, (void*)1);
|
||||
return (*real_start)(real_arg);
|
||||
}
|
||||
|
||||
static void
|
||||
thread_destructor(void* value) {
|
||||
(void)sizeof(value);
|
||||
rpmalloc_thread_finalize(1);
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
static int
|
||||
pthread_create_proxy(pthread_t* thread,
|
||||
const pthread_attr_t* attr,
|
||||
void* (*start_routine)(void*),
|
||||
void* arg) {
|
||||
rpmalloc_initialize();
|
||||
thread_starter_arg* starter_arg = rpmalloc(sizeof(thread_starter_arg));
|
||||
starter_arg->real_start = start_routine;
|
||||
starter_arg->real_arg = arg;
|
||||
return pthread_create(thread, attr, thread_starter, starter_arg);
|
||||
}
|
||||
|
||||
MAC_INTERPOSE_SINGLE(pthread_create_proxy, pthread_create);
|
||||
|
||||
#else
|
||||
|
||||
#include <dlfcn.h>
|
||||
|
||||
int
|
||||
pthread_create(pthread_t* thread,
|
||||
const pthread_attr_t* attr,
|
||||
void* (*start_routine)(void*),
|
||||
void* arg) {
|
||||
#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \
|
||||
defined(__APPLE__) || defined(__HAIKU__)
|
||||
char fname[] = "pthread_create";
|
||||
#else
|
||||
char fname[] = "_pthread_create";
|
||||
#endif
|
||||
void* real_pthread_create = dlsym(RTLD_NEXT, fname);
|
||||
rpmalloc_thread_initialize();
|
||||
thread_starter_arg* starter_arg = rpmalloc(sizeof(thread_starter_arg));
|
||||
starter_arg->real_start = start_routine;
|
||||
starter_arg->real_arg = arg;
|
||||
return (*(int (*)(pthread_t*, const pthread_attr_t*, void* (*)(void*), void*))real_pthread_create)(thread, attr, thread_starter, starter_arg);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#if ENABLE_OVERRIDE
|
||||
|
||||
#if defined(__GLIBC__) && defined(__linux__)
|
||||
|
||||
void* __libc_malloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1) RPALIAS(rpmalloc)
|
||||
void* __libc_calloc(size_t count, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2) RPALIAS(rpcalloc)
|
||||
void* __libc_realloc(void* p, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2) RPALIAS(rprealloc)
|
||||
void __libc_free(void* p) RPALIAS(rpfree)
|
||||
void __libc_cfree(void* p) RPALIAS(rpfree)
|
||||
void* __libc_memalign(size_t align, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2) RPALIAS(rpmemalign)
|
||||
int __posix_memalign(void** p, size_t align, size_t size) RPALIAS(rpposix_memalign)
|
||||
|
||||
extern void* __libc_valloc(size_t size);
|
||||
extern void* __libc_pvalloc(size_t size);
|
||||
|
||||
void*
|
||||
__libc_valloc(size_t size) {
|
||||
return valloc(size);
|
||||
}
|
||||
|
||||
void*
|
||||
__libc_pvalloc(size_t size) {
|
||||
return pvalloc(size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#if (defined(__GNUC__) || defined(__clang__))
|
||||
#pragma GCC visibility pop
|
||||
#endif
|
3638
source/extern/rpmalloc.c
vendored
Normal file
3638
source/extern/rpmalloc.c
vendored
Normal file
File diff suppressed because it is too large
Load Diff
373
source/extern/rpmalloc.h
vendored
Normal file
373
source/extern/rpmalloc.h
vendored
Normal file
@ -0,0 +1,373 @@
|
||||
/* rpmalloc.h - Memory allocator - Public Domain - 2016 Mattias Jansson
|
||||
*
|
||||
* This library provides a cross-platform lock free thread caching malloc implementation in C11.
|
||||
* The latest source code is always available at
|
||||
*
|
||||
* https://github.com/mjansson/rpmalloc
|
||||
*
|
||||
* This library is put in the public domain; you can redistribute it and/or modify it without any restrictions.
|
||||
*
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
# define RPMALLOC_EXPORT __attribute__((visibility("default")))
|
||||
# define RPMALLOC_ALLOCATOR
|
||||
# if (defined(__clang_major__) && (__clang_major__ < 4)) || (defined(__GNUC__) && defined(ENABLE_PRELOAD) && ENABLE_PRELOAD)
|
||||
# define RPMALLOC_ATTRIB_MALLOC
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size)
|
||||
# else
|
||||
# define RPMALLOC_ATTRIB_MALLOC __attribute__((__malloc__))
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size) __attribute__((alloc_size(size)))
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count, size) __attribute__((alloc_size(count, size)))
|
||||
# endif
|
||||
# define RPMALLOC_CDECL
|
||||
#elif defined(_MSC_VER)
|
||||
# define RPMALLOC_EXPORT
|
||||
# define RPMALLOC_ALLOCATOR __declspec(allocator) __declspec(restrict)
|
||||
# define RPMALLOC_ATTRIB_MALLOC
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
|
||||
# define RPMALLOC_CDECL __cdecl
|
||||
#else
|
||||
# define RPMALLOC_EXPORT
|
||||
# define RPMALLOC_ALLOCATOR
|
||||
# define RPMALLOC_ATTRIB_MALLOC
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE(size)
|
||||
# define RPMALLOC_ATTRIB_ALLOC_SIZE2(count,size)
|
||||
# define RPMALLOC_CDECL
|
||||
#endif
|
||||
|
||||
//! Define RPMALLOC_CONFIGURABLE to enable configuring sizes. Will introduce
|
||||
// a very small overhead due to some size calculations not being compile time constants
|
||||
#ifndef RPMALLOC_CONFIGURABLE
|
||||
#define RPMALLOC_CONFIGURABLE 0
|
||||
#endif
|
||||
|
||||
//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* functions).
|
||||
// Will introduce a very small overhead to track fully allocated spans in heaps
|
||||
#ifndef RPMALLOC_FIRST_CLASS_HEAPS
|
||||
#define RPMALLOC_FIRST_CLASS_HEAPS 0
|
||||
#endif
|
||||
|
||||
//! Flag to rpaligned_realloc to not preserve content in reallocation
|
||||
#define RPMALLOC_NO_PRESERVE 1
|
||||
//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,
|
||||
// in which case the original pointer is still valid (just like a call to realloc which failes to allocate
|
||||
// a new block).
|
||||
#define RPMALLOC_GROW_OR_FAIL 2
|
||||
|
||||
typedef struct rpmalloc_global_statistics_t {
|
||||
//! Current amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped;
|
||||
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped_peak;
|
||||
//! Current amount of memory in global caches for small and medium sizes (<32KiB)
|
||||
size_t cached;
|
||||
//! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
|
||||
size_t huge_alloc;
|
||||
//! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default (only if ENABLE_STATISTICS=1)
|
||||
size_t huge_alloc_peak;
|
||||
//! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
|
||||
size_t mapped_total;
|
||||
//! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
|
||||
size_t unmapped_total;
|
||||
} rpmalloc_global_statistics_t;
|
||||
|
||||
typedef struct rpmalloc_thread_statistics_t {
|
||||
//! Current number of bytes available in thread size class caches for small and medium sizes (<32KiB)
|
||||
size_t sizecache;
|
||||
//! Current number of bytes available in thread span caches for small and medium sizes (<32KiB)
|
||||
size_t spancache;
|
||||
//! Total number of bytes transitioned from thread cache to global cache (only if ENABLE_STATISTICS=1)
|
||||
size_t thread_to_global;
|
||||
//! Total number of bytes transitioned from global cache to thread cache (only if ENABLE_STATISTICS=1)
|
||||
size_t global_to_thread;
|
||||
//! Per span count statistics (only if ENABLE_STATISTICS=1)
|
||||
struct {
|
||||
//! Currently used number of spans
|
||||
size_t current;
|
||||
//! High water mark of spans used
|
||||
size_t peak;
|
||||
//! Number of spans transitioned to global cache
|
||||
size_t to_global;
|
||||
//! Number of spans transitioned from global cache
|
||||
size_t from_global;
|
||||
//! Number of spans transitioned to thread cache
|
||||
size_t to_cache;
|
||||
//! Number of spans transitioned from thread cache
|
||||
size_t from_cache;
|
||||
//! Number of spans transitioned to reserved state
|
||||
size_t to_reserved;
|
||||
//! Number of spans transitioned from reserved state
|
||||
size_t from_reserved;
|
||||
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
|
||||
size_t map_calls;
|
||||
} span_use[64];
|
||||
//! Per size class statistics (only if ENABLE_STATISTICS=1)
|
||||
struct {
|
||||
//! Current number of allocations
|
||||
size_t alloc_current;
|
||||
//! Peak number of allocations
|
||||
size_t alloc_peak;
|
||||
//! Total number of allocations
|
||||
size_t alloc_total;
|
||||
//! Total number of frees
|
||||
size_t free_total;
|
||||
//! Number of spans transitioned to cache
|
||||
size_t spans_to_cache;
|
||||
//! Number of spans transitioned from cache
|
||||
size_t spans_from_cache;
|
||||
//! Number of spans transitioned from reserved state
|
||||
size_t spans_from_reserved;
|
||||
//! Number of raw memory map calls (not hitting the reserve spans but resulting in actual OS mmap calls)
|
||||
size_t map_calls;
|
||||
} size_use[128];
|
||||
} rpmalloc_thread_statistics_t;
|
||||
|
||||
typedef struct rpmalloc_config_t {
|
||||
//! Map memory pages for the given number of bytes. The returned address MUST be
|
||||
// aligned to the rpmalloc span size, which will always be a power of two.
|
||||
// Optionally the function can store an alignment offset in the offset variable
|
||||
// in case it performs alignment and the returned pointer is offset from the
|
||||
// actual start of the memory region due to this alignment. The alignment offset
|
||||
// will be passed to the memory unmap function. The alignment offset MUST NOT be
|
||||
// larger than 65535 (storable in an uint16_t), if it is you must use natural
|
||||
// alignment to shift it into 16 bits. If you set a memory_map function, you
|
||||
// must also set a memory_unmap function or else the default implementation will
|
||||
// be used for both. This function must be thread safe, it can be called by
|
||||
// multiple threads simultaneously.
|
||||
void* (*memory_map)(size_t size, size_t* offset);
|
||||
//! Unmap the memory pages starting at address and spanning the given number of bytes.
|
||||
// If release is set to non-zero, the unmap is for an entire span range as returned by
|
||||
// a previous call to memory_map and that the entire range should be released. The
|
||||
// release argument holds the size of the entire span range. If release is set to 0,
|
||||
// the unmap is a partial decommit of a subset of the mapped memory range.
|
||||
// If you set a memory_unmap function, you must also set a memory_map function or
|
||||
// else the default implementation will be used for both. This function must be thread
|
||||
// safe, it can be called by multiple threads simultaneously.
|
||||
void (*memory_unmap)(void* address, size_t size, size_t offset, size_t release);
|
||||
//! Called when an assert fails, if asserts are enabled. Will use the standard assert()
|
||||
// if this is not set.
|
||||
void (*error_callback)(const char* message);
|
||||
//! Called when a call to map memory pages fails (out of memory). If this callback is
|
||||
// not set or returns zero the library will return a null pointer in the allocation
|
||||
// call. If this callback returns non-zero the map call will be retried. The argument
|
||||
// passed is the number of bytes that was requested in the map call. Only used if
|
||||
// the default system memory map function is used (memory_map callback is not set).
|
||||
int (*map_fail_callback)(size_t size);
|
||||
//! Size of memory pages. The page size MUST be a power of two. All memory mapping
|
||||
// requests to memory_map will be made with size set to a multiple of the page size.
|
||||
// Used if RPMALLOC_CONFIGURABLE is defined to 1, otherwise system page size is used.
|
||||
size_t page_size;
|
||||
//! Size of a span of memory blocks. MUST be a power of two, and in [4096,262144]
|
||||
// range (unless 0 - set to 0 to use the default span size). Used if RPMALLOC_CONFIGURABLE
|
||||
// is defined to 1.
|
||||
size_t span_size;
|
||||
//! Number of spans to map at each request to map new virtual memory blocks. This can
|
||||
// be used to minimize the system call overhead at the cost of virtual memory address
|
||||
// space. The extra mapped pages will not be written until actually used, so physical
|
||||
// committed memory should not be affected in the default implementation. Will be
|
||||
// aligned to a multiple of spans that match memory page size in case of huge pages.
|
||||
size_t span_map_count;
|
||||
//! Enable use of large/huge pages. If this flag is set to non-zero and page size is
|
||||
// zero, the allocator will try to enable huge pages and auto detect the configuration.
|
||||
// If this is set to non-zero and page_size is also non-zero, the allocator will
|
||||
// assume huge pages have been configured and enabled prior to initializing the
|
||||
// allocator.
|
||||
// For Windows, see https://docs.microsoft.com/en-us/windows/desktop/memory/large-page-support
|
||||
// For Linux, see https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
|
||||
int enable_huge_pages;
|
||||
//! Respectively allocated pages and huge allocated pages names for systems
|
||||
// supporting it to be able to distinguish among anonymous regions.
|
||||
const char *page_name;
|
||||
const char *huge_page_name;
|
||||
} rpmalloc_config_t;
|
||||
|
||||
//! Initialize allocator with default configuration
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_initialize(void);
|
||||
|
||||
//! Initialize allocator with given configuration
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_initialize_config(const rpmalloc_config_t* config);
|
||||
|
||||
//! Get allocator configuration
|
||||
RPMALLOC_EXPORT const rpmalloc_config_t*
|
||||
rpmalloc_config(void);
|
||||
|
||||
//! Finalize allocator
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_finalize(void);
|
||||
|
||||
//! Initialize allocator for calling thread
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_initialize(void);
|
||||
|
||||
//! Finalize allocator for calling thread
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_finalize(int release_caches);
|
||||
|
||||
//! Perform deferred deallocations pending for the calling thread heap
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_collect(void);
|
||||
|
||||
//! Query if allocator is initialized for calling thread
|
||||
RPMALLOC_EXPORT int
|
||||
rpmalloc_is_thread_initialized(void);
|
||||
|
||||
//! Get per-thread statistics
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_thread_statistics(rpmalloc_thread_statistics_t* stats);
|
||||
|
||||
//! Get global statistics
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats);
|
||||
|
||||
//! Dump all statistics in human readable format to file (should be a FILE*)
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_dump_statistics(void* file);
|
||||
|
||||
//! Allocate a memory block of at least the given size
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc(size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(1);
|
||||
|
||||
//! Free the given memory block
|
||||
RPMALLOC_EXPORT void
|
||||
rpfree(void* ptr);
|
||||
|
||||
//! Allocate a memory block of at least the given size and zero initialize it
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpcalloc(size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(1, 2);
|
||||
|
||||
//! Reallocate the given block to at least the given size
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rprealloc(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Reallocate the given block to at least the given size and alignment,
|
||||
// with optional control flags (see RPMALLOC_NO_PRESERVE).
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment, and zero initialize it.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpaligned_calloc(size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size and alignment.
|
||||
// Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB)
|
||||
RPMALLOC_EXPORT int
|
||||
rpposix_memalign(void** memptr, size_t alignment, size_t size);
|
||||
|
||||
//! Query the usable size of the given memory block (from given pointer to the end of block)
|
||||
RPMALLOC_EXPORT size_t
|
||||
rpmalloc_usable_size(void* ptr);
|
||||
|
||||
//! Dummy empty function for forcing linker symbol inclusion
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_linker_reference(void);
|
||||
|
||||
#if RPMALLOC_FIRST_CLASS_HEAPS
|
||||
|
||||
//! Heap type
|
||||
typedef struct heap_t rpmalloc_heap_t;
|
||||
|
||||
//! Acquire a new heap. Will reuse existing released heaps or allocate memory for a new heap
|
||||
// if none available. Heap API is implemented with the strict assumption that only one single
|
||||
// thread will call heap functions for a given heap at any given time, no functions are thread safe.
|
||||
RPMALLOC_EXPORT rpmalloc_heap_t*
|
||||
rpmalloc_heap_acquire(void);
|
||||
|
||||
//! Release a heap (does NOT free the memory allocated by the heap, use rpmalloc_heap_free_all before destroying the heap).
|
||||
// Releasing a heap will enable it to be reused by other threads. Safe to pass a null pointer.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_release(rpmalloc_heap_t* heap);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap. The returned
|
||||
// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap and zero initialize it.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Allocate a memory block of at least the given size using the given heap and zero initialize it. The returned
|
||||
// block will have the requested alignment. Alignment must either be zero, or a power of two and a multiple of sizeof(void*),
|
||||
// and should ideally be less than memory page size. A caveat of rpmalloc
|
||||
// internals is that this must also be strictly less than the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
|
||||
|
||||
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
|
||||
// by the same heap given to this function.
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(3);
|
||||
|
||||
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
|
||||
// by the same heap given to this function. The returned block will have the requested alignment.
|
||||
// Alignment must be either zero, or a power of two and a multiple of sizeof(void*), and should ideally be
|
||||
// less than memory page size. A caveat of rpmalloc internals is that this must also be strictly less than
|
||||
// the span size (default 64KiB).
|
||||
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
|
||||
rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
|
||||
|
||||
//! Free the given memory block from the given heap. The memory block MUST be allocated
|
||||
// by the same heap given to this function.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr);
|
||||
|
||||
//! Free all memory allocated by the heap
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_free_all(rpmalloc_heap_t* heap);
|
||||
|
||||
//! Set the given heap as the current heap for the calling thread. A heap MUST only be current heap
|
||||
// for a single thread, a heap can never be shared between multiple threads. The previous
|
||||
// current heap for the calling thread is released to be reused by other threads.
|
||||
RPMALLOC_EXPORT void
|
||||
rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap);
|
||||
|
||||
//! Returns which heap the given pointer is allocated on
|
||||
RPMALLOC_EXPORT rpmalloc_heap_t*
|
||||
rpmalloc_get_heap_for_ptr(void* ptr);
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
111
source/extern/rpnew.h
vendored
Normal file
111
source/extern/rpnew.h
vendored
Normal file
@ -0,0 +1,111 @@
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#include <new>
|
||||
#include <rpmalloc.h>
|
||||
|
||||
#ifndef __CRTDECL
|
||||
#define __CRTDECL
|
||||
#endif
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete(void* p) noexcept {
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete[](void* p) noexcept {
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new(std::size_t size) noexcept(false) {
|
||||
return rpmalloc(size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new[](std::size_t size) noexcept(false) {
|
||||
return rpmalloc(size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new(std::size_t size, const std::nothrow_t& tag) noexcept {
|
||||
(void)sizeof(tag);
|
||||
return rpmalloc(size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new[](std::size_t size, const std::nothrow_t& tag) noexcept {
|
||||
(void)sizeof(tag);
|
||||
return rpmalloc(size);
|
||||
}
|
||||
|
||||
#if (__cplusplus >= 201402L || _MSC_VER >= 1916)
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete(void* p, std::size_t size) noexcept {
|
||||
(void)sizeof(size);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete[](void* p, std::size_t size) noexcept {
|
||||
(void)sizeof(size);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if (__cplusplus > 201402L || defined(__cpp_aligned_new))
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete(void* p, std::align_val_t align) noexcept {
|
||||
(void)sizeof(align);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete[](void* p, std::align_val_t align) noexcept {
|
||||
(void)sizeof(align);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete(void* p, std::size_t size, std::align_val_t align) noexcept {
|
||||
(void)sizeof(size);
|
||||
(void)sizeof(align);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void __CRTDECL
|
||||
operator delete[](void* p, std::size_t size, std::align_val_t align) noexcept {
|
||||
(void)sizeof(size);
|
||||
(void)sizeof(align);
|
||||
rpfree(p);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new(std::size_t size, std::align_val_t align) noexcept(false) {
|
||||
return rpaligned_alloc(static_cast<size_t>(align), size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new[](std::size_t size, std::align_val_t align) noexcept(false) {
|
||||
return rpaligned_alloc(static_cast<size_t>(align), size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new(std::size_t size, std::align_val_t align, const std::nothrow_t& tag) noexcept {
|
||||
(void)sizeof(tag);
|
||||
return rpaligned_alloc(static_cast<size_t>(align), size);
|
||||
}
|
||||
|
||||
extern void* __CRTDECL
|
||||
operator new[](std::size_t size, std::align_val_t align, const std::nothrow_t& tag) noexcept {
|
||||
(void)sizeof(tag);
|
||||
return rpaligned_alloc(static_cast<size_t>(align), size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@ -8,6 +8,7 @@
|
||||
"freetype",
|
||||
"libpng",
|
||||
"opus",
|
||||
"zstd"
|
||||
"zstd",
|
||||
"mimalloc"
|
||||
]
|
||||
}
|
Loading…
Reference in New Issue
Block a user