#include "config.h"
#include "FastMalloc.h"
#ifndef USE_SYSTEM_MALLOC
#define USE_SYSTEM_MALLOC 1
#endif
#if USE_SYSTEM_MALLOC
#include <stdlib.h>
#if !PLATFORM(WIN_OS)
#include <pthread.h>
#endif
namespace WTF {
void *fastMalloc(size_t n)
{
return malloc(n);
}
void *fastCalloc(size_t n_elements, size_t element_size)
{
return calloc(n_elements, element_size);
}
void fastFree(void* p)
{
free(p);
}
void *fastRealloc(void* p, size_t n)
{
return realloc(p, n);
}
#if !PLATFORM(WIN_OS)
void fastMallocRegisterThread(pthread_t)
{
}
#endif
}
#else
#if HAVE(STDINT_H)
#include <stdint.h>
#elif HAVE(INTTYPES_H)
#include <inttypes.h>
#else
#include <sys/types.h>
#endif
#include "AlwaysInline.h"
#include "Assertions.h"
#include "TCPageMap.h"
#include "TCSpinLock.h"
#include "TCSystemAlloc.h"
#include <errno.h>
#include <new>
#include <pthread.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#if WTF_CHANGES
namespace WTF {
#define malloc fastMalloc
#define calloc fastCalloc
#define free fastFree
#define realloc fastRealloc
#define MESSAGE LOG_ERROR
#define CHECK_CONDITION ASSERT
#endif
#if HAVE(INTTYPES_H)
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
#define LLU PRIu64
#else
#define LLU "llu" // hope for the best
#endif
static const size_t kPageShift = 12;
static const size_t kPageSize = 1 << kPageShift;
static const size_t kMaxSize = 8u * kPageSize;
static const size_t kAlignShift = 3;
static const size_t kAlignment = 1 << kAlignShift;
static const size_t kNumClasses = 170;
static const size_t kMaxTinySize = 1 << 8;
static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
static const int kNumObjectsToMove = 32;
static const int kMaxFreeListLength = 256;
static const size_t kMinThreadCacheSize = kMaxSize * 2;
static const size_t kMaxThreadCacheSize = 2 << 20;
static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
static const size_t kMaxPages = kMinSystemAlloc;
static const size_t kSampleParameter = 266053;
static const int kSizeBits = 8 * sizeof(size_t);
static unsigned char size_base[kSizeBits];
static unsigned char size_shift[kSizeBits];
static size_t class_to_size[kNumClasses];
static size_t class_to_pages[kNumClasses];
#if PLATFORM(X86) && COMPILER(GCC)
static inline int LgFloor(size_t n) {
int result;
__asm__("bsrl %1, %0"
: "=r" (result) : "ro" (n) : "cc" );
return result;
}
#elif PLATFORM(PPC) && COMPILER(GCC)
static inline int LgFloor(size_t n) {
int result;
__asm__ ("{cntlz|cntlzw} %0,%1"
: "=r" (result) : "r" (n));
return 31 - result;
}
#elif PLATFORM(ARM) && COMPILER(GCC)
static inline int LgFloor(size_t n) {
return 31 - __builtin_clz(n);
}
#else
static inline int LgFloor(size_t n) {
int log = 0;
for (int i = 4; i >= 0; --i) {
int shift = (1 << i);
size_t x = n >> shift;
if (x != 0) {
n = x;
log += shift;
}
}
ASSERT(n == 1);
return log;
}
#endif
static inline int SizeClass(size_t size) {
if (size == 0) size = 1;
const int lg = LgFloor(size);
const int align = size_shift[lg];
return static_cast<int>(size_base[lg]) + ((size-1) >> align);
}
static inline size_t ByteSizeForClass(size_t cl) {
return class_to_size[cl];
}
static void InitSizeClasses() {
for (size_t lg = 0; lg < kAlignShift; lg++) {
size_base[lg] = 1;
size_shift[lg] = kAlignShift;
}
size_t next_class = 1;
int alignshift = kAlignShift;
int last_lg = -1;
for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
int lg = LgFloor(size);
if (lg > last_lg) {
if ((lg >= 8) && (alignshift < 9)) {
alignshift++;
}
size_base[lg] = next_class - ((size-1) >> alignshift);
size_shift[lg] = alignshift;
}
class_to_size[next_class] = size;
last_lg = lg;
next_class++;
}
if (next_class >= kNumClasses) {
MESSAGE("used up too many size classes: %d\n", next_class);
abort();
}
for (size_t cl = 1; cl < next_class; cl++) {
size_t psize = kPageSize;
const size_t s = class_to_size[cl];
while ((psize % s) > (psize >> 4)) {
psize += kPageSize;
}
class_to_pages[cl] = psize >> kPageShift;
}
for (size_t size = 0; size <= kMaxSize; size++) {
const size_t sc = SizeClass(size);
if (sc == 0) {
MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
abort();
}
if (sc > 1 && size <= class_to_size[sc-1]) {
MESSAGE("Allocating unnecessarily large class %d for %" PRIuS
"\n", sc, size);
abort();
}
if (sc >= kNumClasses) {
MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
abort();
}
const size_t s = class_to_size[sc];
if (size > s) {
MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
abort();
}
if (s == 0) {
MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
abort();
}
}
}
static uint64_t metadata_system_bytes = 0;
static void* MetaDataAlloc(size_t bytes) {
void* result = TCMalloc_SystemAlloc(bytes);
if (result != NULL) {
metadata_system_bytes += bytes;
}
return result;
}
template <class T>
class PageHeapAllocator {
private:
static const int kAllocIncrement = 32 << 10;
static const size_t kAlignedSize
= (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
char* free_area_;
size_t free_avail_;
void* free_list_;
int inuse_;
public:
void Init() {
ASSERT(kAlignedSize <= kAllocIncrement);
inuse_ = 0;
free_area_ = NULL;
free_avail_ = 0;
free_list_ = NULL;
}
T* New() {
void* result;
if (free_list_ != NULL) {
result = free_list_;
free_list_ = *(reinterpret_cast<void**>(result));
} else {
if (free_avail_ < kAlignedSize) {
free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
if (free_area_ == NULL) abort();
free_avail_ = kAllocIncrement;
}
result = free_area_;
free_area_ += kAlignedSize;
free_avail_ -= kAlignedSize;
}
inuse_++;
return reinterpret_cast<T*>(result);
}
void Delete(T* p) {
*(reinterpret_cast<void**>(p)) = free_list_;
free_list_ = p;
inuse_--;
}
int inuse() const { return inuse_; }
};
typedef uintptr_t PageID;
typedef uintptr_t Length;
static inline Length pages(size_t bytes) {
return ((bytes + kPageSize - 1) >> kPageShift);
}
static size_t AllocationSize(size_t bytes) {
if (bytes > kMaxSize) {
return pages(bytes) << kPageShift;
} else {
return ByteSizeForClass(SizeClass(bytes));
}
}
struct Span {
PageID start; Length length; Span* next; Span* prev; void* objects; unsigned int free : 1; unsigned int sample : 1; unsigned int sizeclass : 8; unsigned int refcount : 11;
#undef SPAN_HISTORY
#ifdef SPAN_HISTORY
int nexthistory;
char history[64];
int value[64];
#endif
};
#ifdef SPAN_HISTORY
void Event(Span* span, char op, int v = 0) {
span->history[span->nexthistory] = op;
span->value[span->nexthistory] = v;
span->nexthistory++;
if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
}
#else
#define Event(s,o,v) ((void) 0)
#endif
static PageHeapAllocator<Span> span_allocator;
static Span* NewSpan(PageID p, Length len) {
Span* result = span_allocator.New();
memset(result, 0, sizeof(*result));
result->start = p;
result->length = len;
#ifdef SPAN_HISTORY
result->nexthistory = 0;
#endif
return result;
}
static inline void DeleteSpan(Span* span) {
#ifndef NDEBUG
memset(span, 0x3f, sizeof(*span));
#endif
span_allocator.Delete(span);
}
static inline void DLL_Init(Span* list) {
list->next = list;
list->prev = list;
}
static inline void DLL_Remove(Span* span) {
span->prev->next = span->next;
span->next->prev = span->prev;
span->prev = NULL;
span->next = NULL;
}
static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
return list->next == list;
}
#ifndef WTF_CHANGES
static int DLL_Length(const Span* list) {
int result = 0;
for (Span* s = list->next; s != list; s = s->next) {
result++;
}
return result;
}
#endif
#if 0
static void DLL_Print(const char* label, const Span* list) {
MESSAGE("%-10s %p:", label, list);
for (const Span* s = list->next; s != list; s = s->next) {
MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
}
MESSAGE("\n");
}
#endif
static inline void DLL_Prepend(Span* list, Span* span) {
ASSERT(span->next == NULL);
ASSERT(span->prev == NULL);
span->next = list->next;
span->prev = list;
list->next->prev = span;
list->next = span;
}
static void DLL_InsertOrdered(Span* list, Span* span) {
ASSERT(span->next == NULL);
ASSERT(span->prev == NULL);
Span* x = list;
while ((x->next != list) && (x->next->start < span->start)) {
x = x->next;
}
span->next = x->next;
span->prev = x;
x->next->prev = span;
x->next = span;
}
static const int kMaxStackDepth = 31;
struct StackTrace {
uintptr_t size; int depth; void* stack[kMaxStackDepth];
};
static PageHeapAllocator<StackTrace> stacktrace_allocator;
static Span sampled_objects;
template <int BITS> class MapSelector {
public:
typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
};
template <> class MapSelector<32> {
public:
typedef TCMalloc_PageMap2<32-kPageShift> Type;
};
class TCMalloc_PageHeap {
public:
TCMalloc_PageHeap();
Span* New(Length n);
void Delete(Span* span);
void RegisterSizeClass(Span* span, size_t sc);
Span* Split(Span* span, Length n);
inline Span* GetDescriptor(PageID p) const {
return reinterpret_cast<Span*>(pagemap_.get(p));
}
#ifndef WTF_CHANGES
void Dump(TCMalloc_Printer* out);
#endif
inline uint64_t SystemBytes() const { return system_bytes_; }
uint64_t FreeBytes() const {
return (static_cast<uint64_t>(free_pages_) << kPageShift);
}
bool Check();
bool CheckList(Span* list, Length min_pages, Length max_pages);
private:
typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
PageMap pagemap_;
Span large_;
Span free_[kMaxPages];
uintptr_t free_pages_;
uint64_t system_bytes_;
bool GrowHeap(Length n);
void Carve(Span* span, Length n);
void RecordSpan(Span* span) {
pagemap_.set(span->start, span);
if (span->length > 1) {
pagemap_.set(span->start + span->length - 1, span);
}
}
};
TCMalloc_PageHeap::TCMalloc_PageHeap() : pagemap_(MetaDataAlloc),
free_pages_(0),
system_bytes_(0) {
DLL_Init(&large_);
for (size_t i = 0; i < kMaxPages; i++) {
DLL_Init(&free_[i]);
}
}
inline Span* TCMalloc_PageHeap::New(Length n) {
ASSERT(Check());
if (n == 0) n = 1;
for (size_t s = n; s < kMaxPages; s++) {
if (!DLL_IsEmpty(&free_[s])) {
Span* result = free_[s].next;
Carve(result, n);
ASSERT(Check());
free_pages_ -= n;
return result;
}
}
for (int i = 0; i < 2; i++) {
Span *best = NULL;
for (Span* span = large_.next; span != &large_; span = span->next) {
if (span->length >= n &&
(best == NULL || span->length < best->length)) {
best = span;
}
}
if (best != NULL) {
Carve(best, n);
ASSERT(Check());
free_pages_ -= n;
return best;
}
if (i == 0) {
if (!GrowHeap(n)) {
ASSERT(Check());
return NULL;
}
}
}
return NULL;
}
Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
ASSERT(0 < n);
ASSERT(n < span->length);
ASSERT(!span->free);
ASSERT(span->sizeclass == 0);
Event(span, 'T', n);
const int extra = span->length - n;
Span* leftover = NewSpan(span->start + n, extra);
Event(leftover, 'U', extra);
RecordSpan(leftover);
pagemap_.set(span->start + n - 1, span); span->length = n;
return leftover;
}
inline void TCMalloc_PageHeap::Carve(Span* span, Length n) {
ASSERT(n > 0);
DLL_Remove(span);
span->free = 0;
Event(span, 'A', n);
const size_t extra = span->length - n;
ASSERT(extra >= 0);
if (extra > 0) {
Span* leftover = NewSpan(span->start + n, extra);
leftover->free = 1;
Event(leftover, 'S', extra);
RecordSpan(leftover);
if (extra < kMaxPages) {
DLL_Prepend(&free_[extra], leftover);
} else {
DLL_InsertOrdered(&large_, leftover);
}
span->length = n;
pagemap_.set(span->start + n - 1, span);
}
}
inline void TCMalloc_PageHeap::Delete(Span* span) {
ASSERT(Check());
ASSERT(!span->free);
ASSERT(span->length > 0);
ASSERT(GetDescriptor(span->start) == span);
ASSERT(GetDescriptor(span->start + span->length - 1) == span);
span->sizeclass = 0;
span->sample = 0;
const PageID p = span->start;
const Length n = span->length;
Span* prev = GetDescriptor(p-1);
if (prev != NULL && prev->free) {
ASSERT(prev->start + prev->length == p);
const Length len = prev->length;
DLL_Remove(prev);
DeleteSpan(prev);
span->start -= len;
span->length += len;
pagemap_.set(span->start, span);
Event(span, 'L', len);
}
Span* next = GetDescriptor(p+n);
if (next != NULL && next->free) {
ASSERT(next->start == p+n);
const Length len = next->length;
DLL_Remove(next);
DeleteSpan(next);
span->length += len;
pagemap_.set(span->start + span->length - 1, span);
Event(span, 'R', len);
}
Event(span, 'D', span->length);
span->free = 1;
if (span->length < kMaxPages) {
DLL_Prepend(&free_[span->length], span);
} else {
DLL_InsertOrdered(&large_, span);
}
free_pages_ += n;
ASSERT(Check());
}
void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
ASSERT(!span->free);
ASSERT(GetDescriptor(span->start) == span);
ASSERT(GetDescriptor(span->start+span->length-1) == span);
Event(span, 'C', sc);
span->sizeclass = sc;
for (Length i = 1; i < span->length-1; i++) {
pagemap_.set(span->start+i, span);
}
}
#ifndef WTF_CHANGES
void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
int nonempty_sizes = 0;
for (int s = 0; s < kMaxPages; s++) {
if (!DLL_IsEmpty(&free_[s])) nonempty_sizes++;
}
out->printf("------------------------------------------------\n");
out->printf("PageHeap: %d sizes; %6.1f MB free\n", nonempty_sizes,
(static_cast<double>(free_pages_) * kPageSize) / 1048576.0);
out->printf("------------------------------------------------\n");
uint64_t cumulative = 0;
for (int s = 0; s < kMaxPages; s++) {
if (!DLL_IsEmpty(&free_[s])) {
const int list_length = DLL_Length(&free_[s]);
uint64_t s_pages = s * list_length;
cumulative += s_pages;
out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
s, list_length,
(s_pages << kPageShift) / 1048576.0,
(cumulative << kPageShift) / 1048576.0);
}
}
uint64_t large_pages = 0;
int large_spans = 0;
for (Span* s = large_.next; s != &large_; s = s->next) {
out->printf(" [ %6" PRIuS " spans ]\n", s->length);
large_pages += s->length;
large_spans++;
}
cumulative += large_pages;
out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
large_spans,
(large_pages << kPageShift) / 1048576.0,
(cumulative << kPageShift) / 1048576.0);
}
#endif
bool TCMalloc_PageHeap::GrowHeap(Length n) {
ASSERT(kMaxPages >= kMinSystemAlloc);
Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
if (ptr == NULL) {
if (n < ask) {
ask = n;
ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
}
if (ptr == NULL) return false;
}
system_bytes_ += (ask << kPageShift);
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
ASSERT(p > 0);
if (pagemap_.Ensure(p-1, ask+2)) {
Span* span = NewSpan(p, ask);
RecordSpan(span);
Delete(span);
ASSERT(Check());
return true;
} else {
return false;
}
}
bool TCMalloc_PageHeap::Check() {
ASSERT(free_[0].next == &free_[0]);
CheckList(&large_, kMaxPages, 1000000000);
for (Length s = 1; s < kMaxPages; s++) {
CheckList(&free_[s], s, s);
}
return true;
}
#if ASSERT_DISABLED
bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
return true;
}
#else
bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
for (Span* s = list->next; s != list; s = s->next) {
CHECK_CONDITION(s->free);
CHECK_CONDITION(s->length >= min_pages);
CHECK_CONDITION(s->length <= max_pages);
CHECK_CONDITION(GetDescriptor(s->start) == s);
CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
}
return true;
}
#endif
class TCMalloc_ThreadCache_FreeList {
private:
void* list_; uint16_t length_; uint16_t lowater_;
public:
void Init() {
list_ = NULL;
length_ = 0;
lowater_ = 0;
}
int length() const {
return length_;
}
bool empty() const {
return list_ == NULL;
}
int lowwatermark() const { return lowater_; }
void clear_lowwatermark() { lowater_ = length_; }
ALWAYS_INLINE void Push(void* ptr) {
*(reinterpret_cast<void**>(ptr)) = list_;
list_ = ptr;
length_++;
}
ALWAYS_INLINE void* Pop() {
ASSERT(list_ != NULL);
void* result = list_;
list_ = *(reinterpret_cast<void**>(result));
length_--;
if (length_ < lowater_) lowater_ = length_;
return result;
}
};
class TCMalloc_ThreadCache {
private:
typedef TCMalloc_ThreadCache_FreeList FreeList;
size_t size_; pthread_t tid_; bool setspecific_; FreeList list_[kNumClasses];
uint32_t rnd_; size_t bytes_until_sample_;
public:
TCMalloc_ThreadCache* next_;
TCMalloc_ThreadCache* prev_;
void Init(pthread_t tid);
void Cleanup();
int freelist_length(size_t cl) const { return list_[cl].length(); }
size_t Size() const { return size_; }
void* Allocate(size_t size);
void Deallocate(void* ptr, size_t size_class);
void FetchFromCentralCache(size_t cl, size_t allocationSize);
void ReleaseToCentralCache(size_t cl, int N);
void Scavenge();
void Print() const;
bool SampleAllocation(size_t k);
void PickNextSample();
static void InitModule();
static void InitTSD();
static TCMalloc_ThreadCache* GetCache();
static TCMalloc_ThreadCache* GetCacheIfPresent();
static void* CreateCacheIfNecessary();
static void DeleteCache(void* ptr);
static void RecomputeThreadCacheSize();
};
class TCMalloc_Central_FreeList {
public:
void Init(size_t cl);
void Insert(void* object);
void* Remove();
void Populate();
int length() const { return counter_; }
SpinLock lock_;
private:
size_t size_class_; Span empty_; Span nonempty_; size_t counter_; };
class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
private:
char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
};
static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
static char pageheap_memory[sizeof(TCMalloc_PageHeap)] __attribute__((aligned));
static bool phinited = false;
#define pageheap ((TCMalloc_PageHeap*) pageheap_memory)
static bool tsd_inited = false;
static pthread_key_t heap_key;
static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
static TCMalloc_ThreadCache* thread_heaps = NULL;
static int thread_heap_count = 0;
static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
void TCMalloc_Central_FreeList::Init(size_t cl) {
lock_.Init();
size_class_ = cl;
DLL_Init(&empty_);
DLL_Init(&nonempty_);
counter_ = 0;
}
ALWAYS_INLINE void TCMalloc_Central_FreeList::Insert(void* object) {
const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
ASSERT(span != NULL);
ASSERT(span->refcount > 0);
if (span->objects == NULL) {
DLL_Remove(span);
DLL_Prepend(&nonempty_, span);
Event(span, 'N', 0);
}
if (false) {
int got = 0;
for (void* p = span->objects; p != NULL; p = *((void**) p)) {
ASSERT(p != object);
got++;
}
ASSERT(got + span->refcount ==
(span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
}
counter_++;
span->refcount--;
if (span->refcount == 0) {
Event(span, '#', 0);
counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
DLL_Remove(span);
lock_.Unlock();
{
SpinLockHolder h(&pageheap_lock);
pageheap->Delete(span);
}
lock_.Lock();
} else {
*(reinterpret_cast<void**>(object)) = span->objects;
span->objects = object;
}
}
ALWAYS_INLINE void* TCMalloc_Central_FreeList::Remove() {
if (DLL_IsEmpty(&nonempty_)) return NULL;
Span* span = nonempty_.next;
ASSERT(span->objects != NULL);
span->refcount++;
void* result = span->objects;
span->objects = *(reinterpret_cast<void**>(result));
if (span->objects == NULL) {
DLL_Remove(span);
DLL_Prepend(&empty_, span);
Event(span, 'E', 0);
}
counter_--;
return result;
}
ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
lock_.Unlock();
const size_t npages = class_to_pages[size_class_];
Span* span;
{
SpinLockHolder h(&pageheap_lock);
span = pageheap->New(npages);
if (span) pageheap->RegisterSizeClass(span, size_class_);
}
if (span == NULL) {
MESSAGE("allocation failed: %d\n", errno);
lock_.Lock();
return;
}
void** tail = &span->objects;
char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
char* limit = ptr + (npages << kPageShift);
const size_t size = ByteSizeForClass(size_class_);
int num = 0;
char* nptr;
while ((nptr = ptr + size) <= limit) {
*tail = ptr;
tail = reinterpret_cast<void**>(ptr);
ptr = nptr;
num++;
}
ASSERT(ptr <= limit);
*tail = NULL;
span->refcount = 0;
lock_.Lock();
DLL_Prepend(&nonempty_, span);
counter_ += num;
}
inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
if (bytes_until_sample_ < k) {
PickNextSample();
return true;
} else {
bytes_until_sample_ -= k;
return false;
}
}
void TCMalloc_ThreadCache::Init(pthread_t tid) {
size_ = 0;
next_ = NULL;
prev_ = NULL;
tid_ = tid;
setspecific_ = false;
for (size_t cl = 0; cl < kNumClasses; ++cl) {
list_[cl].Init();
}
rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
for (int i = 0; i < 100; i++) {
PickNextSample();
}
}
void TCMalloc_ThreadCache::Cleanup() {
for (size_t cl = 0; cl < kNumClasses; ++cl) {
FreeList* src = &list_[cl];
TCMalloc_Central_FreeList* dst = ¢ral_cache[cl];
SpinLockHolder h(&dst->lock_);
while (!src->empty()) {
dst->Insert(src->Pop());
}
}
}
ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
ASSERT(size <= kMaxSize);
const size_t cl = SizeClass(size);
FreeList* list = &list_[cl];
size_t allocationSize = (size <= kMaxTinySize) ? (size + 7) & ~0x7 : ByteSizeForClass(cl);
if (list->empty()) {
FetchFromCentralCache(cl, allocationSize);
if (list->empty()) return NULL;
}
size_ -= allocationSize;
return list->Pop();
}
inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
size_ += ByteSizeForClass(cl);
FreeList* list = &list_[cl];
list->Push(ptr);
if (list->length() > kMaxFreeListLength) {
ReleaseToCentralCache(cl, kNumObjectsToMove);
}
if (size_ >= per_thread_cache_size) Scavenge();
}
ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t byteSize) {
TCMalloc_Central_FreeList* src = ¢ral_cache[cl];
FreeList* dst = &list_[cl];
SpinLockHolder h(&src->lock_);
for (int i = 0; i < kNumObjectsToMove; i++) {
void* object = src->Remove();
if (object == NULL) {
if (i == 0) {
src->Populate(); object = src->Remove();
}
if (object == NULL) {
break;
}
}
dst->Push(object);
size_ += byteSize;
}
}
inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
FreeList* src = &list_[cl];
TCMalloc_Central_FreeList* dst = ¢ral_cache[cl];
SpinLockHolder h(&dst->lock_);
if (N > src->length()) N = src->length();
size_ -= N*ByteSizeForClass(cl);
while (N-- > 0) {
void* ptr = src->Pop();
dst->Insert(ptr);
}
}
inline void TCMalloc_ThreadCache::Scavenge() {
#ifndef WTF_CHANGES
int64 start = CycleClock::Now();
#endif
for (size_t cl = 0; cl < kNumClasses; cl++) {
FreeList* list = &list_[cl];
const int lowmark = list->lowwatermark();
if (lowmark > 0) {
const int drop = (lowmark > 1) ? lowmark/2 : 1;
ReleaseToCentralCache(cl, drop);
}
list->clear_lowwatermark();
}
#ifndef WTF_CHANGES
int64 finish = CycleClock::Now();
CycleTimer ct;
MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
#endif
}
bool isMultiThreaded;
TCMalloc_ThreadCache *mainThreadCache;
pthread_t mainThreadID;
static SpinLock multiThreadedLock = SPINLOCK_INITIALIZER;
void fastMallocRegisterThread(pthread_t thread)
{
if (thread != mainThreadID) {
SpinLockHolder lock(&multiThreadedLock);
isMultiThreaded = true;
mainThreadCache = 0;
}
}
ALWAYS_INLINE TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
void* ptr = NULL;
if (!tsd_inited) {
InitModule();
} else {
if (mainThreadCache)
ptr = mainThreadCache;
else
ptr = pthread_getspecific(heap_key);
}
if (ptr == NULL) ptr = CreateCacheIfNecessary();
return reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
}
inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
if (mainThreadCache)
return mainThreadCache;
if (!tsd_inited) return NULL;
return reinterpret_cast<TCMalloc_ThreadCache*>
(pthread_getspecific(heap_key));
}
void TCMalloc_ThreadCache::PickNextSample() {
static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
uint32_t r = rnd_;
rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
bytes_until_sample_ = rnd_ % kSampleParameter;
}
void TCMalloc_ThreadCache::InitModule() {
SpinLockHolder h(&pageheap_lock);
if (!phinited) {
InitSizeClasses();
threadheap_allocator.Init();
span_allocator.Init();
span_allocator.New(); span_allocator.New(); stacktrace_allocator.Init();
DLL_Init(&sampled_objects);
for (size_t i = 0; i < kNumClasses; ++i) {
central_cache[i].Init(i);
}
new ((void*)pageheap_memory) TCMalloc_PageHeap;
phinited = 1;
}
}
void TCMalloc_ThreadCache::InitTSD() {
ASSERT(!tsd_inited);
pthread_key_create(&heap_key, DeleteCache);
tsd_inited = true;
pthread_t zero;
memset(&zero, 0, sizeof(zero));
SpinLockHolder h(&pageheap_lock);
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
if (h->tid_ == zero) {
h->tid_ = pthread_self();
}
}
}
void* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
TCMalloc_ThreadCache* heap = NULL;
{
SpinLockHolder h(&pageheap_lock);
pthread_t me;
if (!tsd_inited) {
memset(&me, 0, sizeof(me));
} else {
me = pthread_self();
}
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
if (h->tid_ == me) {
heap = h;
break;
}
}
if (heap == NULL) {
heap = threadheap_allocator.New();
heap->Init(me);
heap->next_ = thread_heaps;
heap->prev_ = NULL;
if (thread_heaps != NULL) thread_heaps->prev_ = heap;
thread_heaps = heap;
thread_heap_count++;
RecomputeThreadCacheSize();
if (!isMultiThreaded) {
mainThreadCache = heap;
mainThreadID = pthread_self();
}
}
}
if (!heap->setspecific_ && tsd_inited) {
heap->setspecific_ = true;
pthread_setspecific(heap_key, heap);
}
return heap;
}
void TCMalloc_ThreadCache::DeleteCache(void* ptr) {
TCMalloc_ThreadCache* heap;
heap = reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
heap->Cleanup();
SpinLockHolder h(&pageheap_lock);
if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
if (thread_heaps == heap) thread_heaps = heap->next_;
thread_heap_count--;
RecomputeThreadCacheSize();
threadheap_allocator.Delete(heap);
}
void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
int n = thread_heap_count > 0 ? thread_heap_count : 1;
size_t space = overall_thread_cache_size / n;
if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
per_thread_cache_size = space;
}
void TCMalloc_ThreadCache::Print() const {
for (size_t cl = 0; cl < kNumClasses; ++cl) {
MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
ByteSizeForClass(cl),
list_[cl].length(),
list_[cl].lowwatermark());
}
}
struct TCMallocStats {
uint64_t system_bytes; uint64_t thread_bytes; uint64_t central_bytes; uint64_t pageheap_bytes; uint64_t metadata_bytes; };
#ifndef WTF_CHANGES
static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
r->central_bytes = 0;
for (size_t cl = 0; cl < kNumClasses; ++cl) {
SpinLockHolder h(¢ral_cache[cl].lock_);
const int length = central_cache[cl].length();
r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
if (class_count) class_count[cl] = length;
}
r->thread_bytes = 0;
{ SpinLockHolder h(&pageheap_lock);
for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
r->thread_bytes += h->Size();
if (class_count) {
for (size_t cl = 0; cl < kNumClasses; ++cl) {
class_count[cl] += h->freelist_length(cl);
}
}
}
}
{ SpinLockHolder h(&pageheap_lock);
r->system_bytes = pageheap->SystemBytes();
r->metadata_bytes = metadata_system_bytes;
r->pageheap_bytes = pageheap->FreeBytes();
}
}
#endif
#ifndef WTF_CHANGES
static void DumpStats(TCMalloc_Printer* out, int level) {
TCMallocStats stats;
uint64_t class_count[kNumClasses];
ExtractStats(&stats, (level >= 2 ? class_count : NULL));
if (level >= 2) {
out->printf("------------------------------------------------\n");
uint64_t cumulative = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
if (class_count[cl] > 0) {
uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
cumulative += class_bytes;
out->printf("class %3d [ %8" PRIuS " bytes ] : "
"%8" LLU " objs; %5.1f MB; %5.1f cum MB\n",
cl, ByteSizeForClass(cl),
class_count[cl],
class_bytes / 1048576.0,
cumulative / 1048576.0);
}
}
SpinLockHolder h(&pageheap_lock);
pageheap->Dump(out);
}
const uint64_t bytes_in_use = stats.system_bytes
- stats.pageheap_bytes
- stats.central_bytes
- stats.thread_bytes;
out->printf("------------------------------------------------\n"
"MALLOC: %12" LLU " Heap size\n"
"MALLOC: %12" LLU " Bytes in use by application\n"
"MALLOC: %12" LLU " Bytes free in page heap\n"
"MALLOC: %12" LLU " Bytes free in central cache\n"
"MALLOC: %12" LLU " Bytes free in thread caches\n"
"MALLOC: %12" LLU " Spans in use\n"
"MALLOC: %12" LLU " Thread heaps in use\n"
"MALLOC: %12" LLU " Metadata allocated\n"
"------------------------------------------------\n",
stats.system_bytes,
bytes_in_use,
stats.pageheap_bytes,
stats.central_bytes,
stats.thread_bytes,
uint64_t(span_allocator.inuse()),
uint64_t(threadheap_allocator.inuse()),
stats.metadata_bytes);
}
static void PrintStats(int level) {
const int kBufferSize = 16 << 10;
char* buffer = new char[kBufferSize];
TCMalloc_Printer printer(buffer, kBufferSize);
DumpStats(&printer, level);
write(STDERR_FILENO, buffer, strlen(buffer));
delete[] buffer;
}
static void** DumpStackTraces() {
int needed_slots = 0;
{
SpinLockHolder h(&pageheap_lock);
for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
needed_slots += 3 + stack->depth;
}
needed_slots += 100; needed_slots += needed_slots/8; }
void** result = new void*[needed_slots];
if (result == NULL) {
MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
needed_slots);
return NULL;
}
SpinLockHolder h(&pageheap_lock);
int used_slots = 0;
for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
ASSERT(used_slots < needed_slots); StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
if (used_slots + 3 + stack->depth >= needed_slots) {
break;
}
result[used_slots+0] = reinterpret_cast<void*>(1);
result[used_slots+1] = reinterpret_cast<void*>(stack->size);
result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
for (int d = 0; d < stack->depth; d++) {
result[used_slots+3+d] = stack->stack[d];
}
used_slots += 3 + stack->depth;
}
result[used_slots] = reinterpret_cast<void*>(0);
return result;
}
#endif
#ifndef WTF_CHANGES
class TCMallocImplementation : public MallocExtension {
public:
virtual void GetStats(char* buffer, int buffer_length) {
ASSERT(buffer_length > 0);
TCMalloc_Printer printer(buffer, buffer_length);
if (buffer_length < 10000) {
DumpStats(&printer, 1);
} else {
DumpStats(&printer, 2);
}
}
virtual void** ReadStackTraces() {
return DumpStackTraces();
}
virtual bool GetNumericProperty(const char* name, size_t* value) {
ASSERT(name != NULL);
if (strcmp(name, "generic.current_allocated_bytes") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.system_bytes
- stats.thread_bytes
- stats.central_bytes
- stats.pageheap_bytes;
return true;
}
if (strcmp(name, "generic.heap_size") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.system_bytes;
return true;
}
if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
SpinLockHolder l(&pageheap_lock);
*value = pageheap->FreeBytes();
return true;
}
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
SpinLockHolder l(&pageheap_lock);
*value = overall_thread_cache_size;
return true;
}
if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
*value = stats.thread_bytes;
return true;
}
return false;
}
virtual bool SetNumericProperty(const char* name, size_t value) {
ASSERT(name != NULL);
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
if (value > (1<<30)) value = (1<<30);
SpinLockHolder l(&pageheap_lock);
overall_thread_cache_size = static_cast<size_t>(value);
TCMalloc_ThreadCache::RecomputeThreadCacheSize();
return true;
}
return false;
}
};
#endif
#ifndef WTF_CHANGES
static bool tcmalloc_is_destroyed = false;
#endif
#ifndef WTF_CHANGES
static Span* DoSampledAllocation(size_t size) {
SpinLockHolder h(&pageheap_lock);
Span* span = pageheap->New(pages(size == 0 ? 1 : size));
if (span == NULL) {
return NULL;
}
StackTrace* stack = stacktrace_allocator.New();
if (stack == NULL) {
return span;
}
stack->depth = GetStackTrace(stack->stack, kMaxStackDepth, 2);
stack->size = size;
span->sample = 1;
span->objects = stack;
DLL_Prepend(&sampled_objects, span);
return span;
}
#endif
static ALWAYS_INLINE void* do_malloc(size_t size) {
#ifndef WTF_CHANGES
if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
MESSAGE("In tcmalloc do_malloc(%" PRIuS")\n", size);
#endif
#ifndef WTF_CHANGES
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
if (heap->SampleAllocation(size)) {
Span* span = DoSampledAllocation(size);
if (span == NULL) return NULL;
return reinterpret_cast<void*>(span->start << kPageShift);
} else
#endif
if (size > kMaxSize) {
if (!tsd_inited && !phinited)
TCMalloc_ThreadCache::InitModule();
SpinLockHolder h(&pageheap_lock);
Span* span = pageheap->New(pages(size));
if (span == NULL) return NULL;
return reinterpret_cast<void*>(span->start << kPageShift);
} else {
#ifdef WTF_CHANGES
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
#endif
return heap->Allocate(size);
}
}
static ALWAYS_INLINE void do_free(void* ptr) {
#ifndef WTF_CHANGES
if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
MESSAGE("In tcmalloc do_free(%p)\n", ptr);
#endif
#if WTF_CHANGES
if (ptr == NULL) return;
#else WTF_CHANGES
if (ptr == NULL || tcmalloc_is_destroyed) return;
#endif
ASSERT(pageheap != NULL); const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
#ifndef WTF_CHANGES
if (span == NULL) {
MESSAGE("tcmalloc: ignoring potential glibc-2.3.5 induced free "
"of an unknown object %p\n", ptr);
return;
}
#endif
ASSERT(span != NULL);
ASSERT(!span->free);
const size_t cl = span->sizeclass;
if (cl != 0) {
ASSERT(!span->sample);
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
if (heap != NULL) {
heap->Deallocate(ptr, cl);
} else {
SpinLockHolder h(¢ral_cache[cl].lock_);
central_cache[cl].Insert(ptr);
}
} else {
SpinLockHolder h(&pageheap_lock);
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
ASSERT(span->start == p);
if (span->sample) {
DLL_Remove(span);
stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
span->objects = NULL;
}
pageheap->Delete(span);
}
}
#ifndef WTF_CHANGES
static void* do_memalign(size_t align, size_t size) {
ASSERT((align & (align - 1)) == 0);
ASSERT(align > 0);
if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
if (size == 0) size = 1;
if (size <= kMaxSize && align < kPageSize) {
size_t cl = SizeClass(size);
while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
cl++;
}
if (cl < kNumClasses) {
TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
return heap->Allocate(class_to_size[cl]);
}
}
SpinLockHolder h(&pageheap_lock);
if (align <= kPageSize) {
Span* span = pageheap->New(pages(size));
if (span == NULL) return NULL;
return reinterpret_cast<void*>(span->start << kPageShift);
}
const int alloc = pages(size + align);
Span* span = pageheap->New(alloc);
if (span == NULL) return NULL;
int skip = 0;
while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
skip++;
}
ASSERT(skip < alloc);
if (skip > 0) {
Span* rest = pageheap->Split(span, skip);
pageheap->Delete(span);
span = rest;
}
const size_t needed = pages(size);
ASSERT(span->length >= needed);
if (span->length > needed) {
Span* trailer = pageheap->Split(span, needed);
pageheap->Delete(trailer);
}
return reinterpret_cast<void*>(span->start << kPageShift);
}
#endif
class TCMallocGuard {
public:
TCMallocGuard() {
#ifndef WTF_CHANGES
char *envval;
if ((envval = getenv("TCMALLOC_DEBUG"))) {
TCMallocDebug::level = atoi(envval);
MESSAGE("Set tcmalloc debugging level to %d\n", TCMallocDebug::level);
}
#endif
do_free(do_malloc(1));
TCMalloc_ThreadCache::InitTSD();
do_free(do_malloc(1));
#ifndef WTF_CHANGES
MallocExtension::Register(new TCMallocImplementation);
#endif
}
#ifndef WTF_CHANGES
~TCMallocGuard() {
const char* env = getenv("MALLOCSTATS");
if (env != NULL) {
int level = atoi(env);
if (level < 1) level = 1;
PrintStats(level);
}
}
#endif
};
static TCMallocGuard module_enter_exit_hook;
#ifndef WTF_CHANGES
extern "C"
#endif
void* malloc(size_t size) {
void* result = do_malloc(size);
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, size);
#endif
return result;
}
#ifndef WTF_CHANGES
extern "C"
#endif
void free(void* ptr) {
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(ptr);
#endif
do_free(ptr);
}
#ifndef WTF_CHANGES
extern "C"
#endif
void* calloc(size_t n, size_t elem_size) {
void* result = do_malloc(n * elem_size);
if (result != NULL) {
memset(result, 0, n * elem_size);
}
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, n * elem_size);
#endif
return result;
}
#ifndef WTF_CHANGES
extern "C"
#endif
void cfree(void* ptr) {
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(ptr);
#endif
do_free(ptr);
}
#ifndef WTF_CHANGES
extern "C"
#endif
void* realloc(void* old_ptr, size_t new_size) {
if (old_ptr == NULL) {
void* result = do_malloc(new_size);
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(result, new_size);
#endif
return result;
}
if (new_size == 0) {
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(old_ptr);
#endif
free(old_ptr);
return NULL;
}
const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
Span* span = pageheap->GetDescriptor(p);
size_t old_size;
if (span->sizeclass != 0) {
old_size = ByteSizeForClass(span->sizeclass);
} else {
old_size = span->length << kPageShift;
}
if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
void* new_ptr = do_malloc(new_size);
if (new_ptr == NULL) {
return NULL;
}
#ifndef WTF_CHANGES
MallocHook::InvokeNewHook(new_ptr, new_size);
#endif
memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
#ifndef WTF_CHANGES
MallocHook::InvokeDeleteHook(old_ptr);
#endif
free(old_ptr);
return new_ptr;
} else {
return old_ptr;
}
}
#ifndef COMPILER_INTEL
#define OPNEW_THROW
#define OPDELETE_THROW
#else
#define OPNEW_THROW throw(std::bad_alloc)
#define OPDELETE_THROW throw()
#endif
#ifndef WTF_CHANGES
void* operator new(size_t size) OPNEW_THROW {
void* p = do_malloc(size);
if (p == NULL) {
MESSAGE("Unable to allocate %" PRIuS " bytes: new failed\n", size);
abort();
}
MallocHook::InvokeNewHook(p, size);
return p;
}
void operator delete(void* p) OPDELETE_THROW {
MallocHook::InvokeDeleteHook(p);
do_free(p);
}
void* operator new[](size_t size) OPNEW_THROW {
void* p = do_malloc(size);
if (p == NULL) {
MESSAGE("Unable to allocate %" PRIuS " bytes: new failed\n", size);
abort();
}
MallocHook::InvokeNewHook(p, size);
return p;
}
void operator delete[](void* p) OPDELETE_THROW {
MallocHook::InvokeDeleteHook(p);
do_free(p);
}
extern "C" void* memalign(size_t align, size_t size) {
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) {
if (((align % sizeof(void*)) != 0) ||
((align & (align - 1)) != 0) ||
(align == 0)) {
return EINVAL;
}
void* result = do_memalign(align, size);
MallocHook::InvokeNewHook(result, size);
if (result == NULL) {
return ENOMEM;
} else {
*result_ptr = result;
return 0;
}
}
static size_t pagesize = 0;
extern "C" void* valloc(size_t size) {
if (pagesize == 0) pagesize = getpagesize();
void* result = do_memalign(pagesize, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" void* pvalloc(size_t size) {
if (pagesize == 0) pagesize = getpagesize();
size = (size + pagesize - 1) & ~(pagesize - 1);
void* result = do_memalign(pagesize, size);
MallocHook::InvokeNewHook(result, size);
return result;
}
extern "C" void malloc_stats(void) {
PrintStats(1);
}
extern "C" int mallopt(int cmd, int value) {
return 1; }
extern "C" struct mallinfo mallinfo(void) {
TCMallocStats stats;
ExtractStats(&stats, NULL);
struct mallinfo info;
memset(&info, 0, sizeof(info));
info.arena = static_cast<int>(stats.system_bytes);
info.fsmblks = static_cast<int>(stats.thread_bytes + stats.central_bytes);
info.fordblks = static_cast<int>(stats.pageheap_bytes);
info.uordblks = static_cast<int>(stats.system_bytes
- stats.thread_bytes
- stats.central_bytes
- stats.pageheap_bytes);
return info;
}
extern "C" {
#if COMPILER(GCC) && HAVE(__ATTRIBUTE__)
#define ALIAS(x) __attribute__ ((weak, alias (x)))
void* __libc_malloc(size_t size) ALIAS("malloc");
void __libc_free(void* ptr) ALIAS("free");
void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
void __libc_cfree(void* ptr) ALIAS("cfree");
void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
void* __libc_valloc(size_t size) ALIAS("valloc");
void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
#undef ALIAS
#else
void* __libc_malloc(size_t size) { return malloc(size); }
void __libc_free(void* ptr) { free(ptr); }
void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
void __libc_cfree(void* ptr) { cfree(ptr); }
void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
void* __libc_valloc(size_t size) { return valloc(size); }
void* __libc_pvalloc(size_t size) { return pvalloc(size); }
int __posix_memalign(void** r, size_t a, size_t s) {
return posix_memalign(r, a, s);
}
#endif
}
#endif
#if WTF_CHANGES
} #endif
#endif // USE_SYSTEM_MALLOC