#pragma once
#ifndef __AUTO_DEFS__
#define __AUTO_DEFS__
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/resource.h>
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/mach_time.h>
#include <mach/task.h>
#include <mach/thread_act.h>
#include <mach/vm_map.h>
#include <pthread.h>
#include <unistd.h>
#include <malloc/malloc.h>
#include <map>
#include <vector>
#include <ext/hash_map>
#include <ext/hash_set>
#include <libkern/OSAtomic.h>
#include <System/pthread_machdep.h>
#include "AutoEnvironment.h"
#include "auto_impl_utilities.h"
#ifdef DEBUG
extern "C" void* WatchPoint;
#endif
extern "C" malloc_zone_t *aux_zone;
extern "C" const char *auto_prelude(void);
extern "C" char *__crashreporter_info__;
namespace Auto {
inline const char *prelude(void) { return auto_prelude(); }
#if defined(DEBUG)
#define ASSERTION(expression) if(!(expression)) { \
malloc_printf("*** %s: Assertion %s %s.%d\n", prelude(), #expression, __FILE__, __LINE__); \
__builtin_trap(); \
}
#else
#define ASSERTION(expression) (void)(expression)
#endif
typedef kern_return_t (*auto_memory_reader_t)(task_t remote_task, vm_address_t remote_address, vm_size_t size, void **local_memory);
typedef void (*auto_vm_range_recorder_t)(task_t, void *, unsigned type, vm_range_t *, unsigned);
typedef unsigned long usword_t; typedef signed long sword_t;
enum {
page_size = 0x1000u, page_size_log2 = 12,
bits_per_byte = 8, bits_per_byte_log2 = 3,
is_64BitWord = sizeof(usword_t) == 8u, is_32BitWord = sizeof(usword_t) == 4u,
bytes_per_word = is_64BitWord ? 8u : 4u, bytes_per_word_log2 = is_64BitWord ? 3u : 2u,
bits_per_word = is_64BitWord ? 64u : 32u , bits_per_word_log2 = is_64BitWord ? 6u : 5u,
bytes_per_quad = 16, bytes_per_quad_log2 = 4,
bits_per_quad = 128, bits_per_quad_log2 = 7,
bits_mask = (usword_t)(bits_per_word - 1),
all_zeros = (usword_t)0, all_ones = ~all_zeros,
not_found = all_ones,
pointer_alignment = is_64BitWord ? 3u : 2u, block_alignment = is_64BitWord ? 5u : 4u, };
inline const bool is_all_ones(usword_t x) { return !(~x); }
inline const bool is_all_zeros(usword_t x) { return !x; }
inline const bool is_some_ones(usword_t x) { return x != 0; }
inline const bool is_some_zeros(usword_t x) { return ~x != 0; }
inline void *displace(void *address, const intptr_t offset) { return (void *)((char *)address + offset); }
static inline const usword_t min(usword_t a, usword_t b) { return a < b ? a : b; }
static inline const usword_t max(usword_t a, usword_t b) { return a > b ? a : b; }
static inline const usword_t mask(usword_t n) {
ASSERTION(0 < n && n <= bits_per_word);
#if defined(__ppc__) || defined(__ppc64__)
return (1L << n) - 1;
#else
return (2L << (n - 1)) - 1;
#endif
}
static inline const bool is_power_of_2(usword_t x) { return ((x - 1) & x) == 0; }
static inline const usword_t count_leading_zeros(register usword_t value) {
#if defined(__ppc__)
register usword_t count;
__asm__ volatile ("cntlzw %[count], %[value]" : [count] "=r" (count) : [value] "r" (value));
return count;
#elif defined(__ppc64__)
register usword_t count;
__asm__ volatile ("cntlzd %[count], %[value]" : [count] "=r" (count) : [value] "r" (value));
return count;
#elif __GNUC__ < 4
if (!value) return bits_per_word;
unsigned count = 0;
for (unsigned sh = bits_per_word >> 1; sh; sh >>= 1) {
unsigned tmp = value >> sh;
if (tmp) {
value = tmp;
} else {
count |= sh;
}
}
return count;
#elif __LP64__
return value ? __builtin_clzll(value) : bits_per_word;
#else
return value ? __builtin_clz(value) : bits_per_word;
#endif
}
inline usword_t rotate_bits_left(usword_t value, usword_t n) { ASSERTION(0 < n && n < bits_per_word); return (value << n) | (value >> (bits_per_word - n)); }
inline usword_t rotate_bits_right(usword_t value, usword_t n) { ASSERTION(0 < n && n < bits_per_word); return (value << (bits_per_word - n)) | (value >> n); }
#if defined(__ppc__) || defined(__ppc64__)
inline bool has_altivec() { return false; } #else
inline bool has_altivec() { return false; }
#endif
static inline const usword_t ilog2(register usword_t value) { return (bits_per_word - 1) - count_leading_zeros(value); }
static inline const usword_t partition(usword_t x, usword_t y) { return (x + y - 1) / y; }
static inline const usword_t partition2(usword_t x, usword_t y) { return (x + mask(y)) >> y; }
static inline const usword_t align(usword_t x, usword_t y) { return partition(x, y) * y; }
static inline const usword_t align2(usword_t x, usword_t y) { usword_t m = mask(y); return (x + m) & ~m; }
static inline void *align_down(void *address, usword_t n = page_size_log2) {
usword_t m = mask(n);
return (void *)((uintptr_t)address & ~m);
}
static inline void *align_up(void *address, usword_t n = page_size_log2) {
usword_t m = mask(n);
return (void *)(((uintptr_t)address + m) & ~m);
}
static inline const usword_t count_trailing_bits(register usword_t value) { return bits_per_word - count_leading_zeros(value); }
static inline const usword_t trailing_zeros(usword_t x) { return (x - 1) & ~ x; }
static inline const usword_t trailing_ones(usword_t x) { return x & ~(x + 1); }
static inline const usword_t count_trailing_zeros(usword_t x) { return count_trailing_bits(trailing_zeros(x)); }
static inline const usword_t count_trailing_ones(usword_t x) { return count_trailing_bits(trailing_ones(x)); }
inline bool is_bit_aligned(void *address, usword_t n) { return !((uintptr_t)address & mask(n)); }
inline bool is_pointer_aligned(void *address) { return is_bit_aligned(address, pointer_alignment); }
inline bool is_block_aligned(void *address) { return is_bit_aligned(address, block_alignment); }
inline bool is_equal(const char *x, const char *y) { return strcmp(x, y) == 0; }
inline intptr_t compare_and_exchange(register intptr_t *address, register intptr_t old_value, register intptr_t new_value) {
#if defined(__ppc__)
register intptr_t loaded; register intptr_t tmp;
__asm__ volatile ("1: lwarx %[loaded],0,%[address]" : [loaded] "=r" (loaded) : [address] "r" (address) : "memory");
__asm__ volatile ("cmpw %[loaded],%[old_value]" : : [loaded] "r" (loaded), [old_value] "r" (old_value) : "cr0");
__asm__ volatile ("bne- 2f");
__asm__ volatile ("stwcx. %[new_value],0,%[address]" : : [new_value] "r" (new_value), [address] "r" (address) : "memory", "cr0");
__asm__ volatile ("bne- 1b");
__asm__ volatile ("2: addi %[tmp],r1,-16" : [tmp] "=r" (tmp) : : "r1");
__asm__ volatile ("stwcx. %[tmp],0,%[tmp]" : : [tmp] "r" (tmp) : "memory");
return loaded;
#elif defined(__ppc64__)
register intptr_t loaded; register intptr_t tmp;
__asm__ volatile ("1: ldarx %[loaded],0,%[address]" : [loaded] "=r" (loaded) : [address] "r" (address) : "memory");
__asm__ volatile ("cmpd %[loaded],%[old_value]" : : [loaded] "r" (loaded), [old_value] "r" (old_value) : "cr0");
__asm__ volatile ("bne- 2f");
__asm__ volatile ("stdcx. %[new_value],0,%[address]" : : [new_value] "r" (new_value), [address] "r" (address) : "memory", "cr0");
__asm__ volatile ("bne- 1b");
__asm__ volatile ("2: addi %[tmp],r1,-16" : [tmp] "=r" (tmp) : : "r1");
__asm__ volatile ("stdcx. %[tmp],0,%[tmp]" : : [tmp] "r" (tmp) : "memory");
return loaded;
#elif defined(__i386__)
register intptr_t loaded;
__asm__ volatile ("lock; cmpxchgl %[src], %[dst]"
: [acc] "=a" (loaded), "=m" (*address)
: [src] "r" (new_value), [dst] "m" (*address), "[acc]" (old_value)
: "memory");
return loaded;
#elif defined(__x86_64__)
register intptr_t loaded;
__asm__ volatile ("lock; cmpxchgq %[src], %[dst]"
: [acc] "=a" (loaded), "=m" (*address)
: [src] "r" (new_value), [dst] "m" (*address), "[acc]" (old_value)
: "memory");
return loaded;
#else
#error architecture unsupported
#endif
}
#if defined(__ppc__) || defined(__ppc64__)
extern "C" int _cpu_capabilities;
inline void touch_cache(register void *address) {
__asm__ volatile ("dcbt 0, %[address]" : : [address] "r" (address));
}
inline void touch_cache_range(void *address, void *end) {
register char *cursor = (char *)address;
register intptr_t stride = (_cpu_capabilities << 3) & 0xe0;
for ( ; cursor < (char *)end; cursor += stride) {
__asm__ volatile ("dcbt 0, %[cursor]" : : [cursor] "r" (cursor));
}
}
#endif
inline void error(const char *msg, const void *address = NULL) {
if (address) malloc_printf("*** %s: agc error for object %p: %s\n", prelude(), address, msg);
else malloc_printf("*** %s: agc error: %s\n", prelude(), msg);
#if 0 && defined(DEBUG)
__builtin_trap();
#endif
}
inline void *allocate_memory(usword_t size, usword_t alignment = page_size, signed label = VM_MEMORY_MALLOC) {
vm_address_t address = 0;
#if 0
switch (label) {
default:
case VM_MEMORY_MALLOC: label = VM_MEMORY_APPLICATION_SPECIFIC_1; break; case VM_MEMORY_MALLOC_SMALL: label = VM_MEMORY_APPLICATION_SPECIFIC_1 + 1; break; case VM_MEMORY_MALLOC_LARGE: label = VM_MEMORY_APPLICATION_SPECIFIC_1 + 2; break; }
#endif
kern_return_t err = vm_map(mach_task_self(), &address, size, alignment - 1,
VM_FLAGS_ANYWHERE | VM_MAKE_TAG(label), MACH_PORT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
if (err != KERN_SUCCESS) {
malloc_printf("*** %s: Zone::Can not allocate 0x%lx bytes\n", prelude(), size);
return NULL;
}
#if defined(DEBUG)
if (Environment::print_allocs) malloc_printf("vm_map @%p %d\n", address, size);
#endif
return (void *)address;
}
inline void deallocate_memory(void *address, usword_t size) {
#if defined(DEBUG)
if (Environment::print_allocs) malloc_printf("vm_deallocate @%p %d\n", address, size);
#endif
kern_return_t err = vm_deallocate(mach_task_self(), (vm_address_t)address, size);
ASSERTION(err == KERN_SUCCESS);
}
inline void copy_memory(void *dest, void *source, usword_t size) {
kern_return_t err = vm_copy(mach_task_self(), (vm_address_t)source, size, (vm_address_t)dest);
ASSERTION(err == KERN_SUCCESS);
}
inline void uncommit_memory(void *address, usword_t size) {
ASSERTION(!(size % vm_page_size));
kern_return_t err = vm_msync(mach_task_self(), (vm_address_t)address, size, VM_SYNC_KILLPAGES);
ASSERTION(err == KERN_SUCCESS);
}
inline void guard_page(void *address) { vm_protect(mach_task_self(), (vm_address_t)address, page_size, false, VM_PROT_NONE); }
inline void unguard_page(void *address) { vm_protect(mach_task_self(), (vm_address_t)address, page_size, false, VM_PROT_DEFAULT); }
inline void *allocate_guarded_memory(usword_t size) {
usword_t needed = align2(size, page_size_log2);
void * allocation = allocate_memory(needed + 2 * page_size, page_size, VM_MEMORY_MALLOC);
if (allocation) {
guard_page(allocation);
guard_page(displace(allocation, page_size + needed));
return displace(allocation, page_size);
}
return allocation;
}
inline void deallocate_guarded_memory(void *address, usword_t size) {
usword_t needed = align2(size, page_size_log2);
deallocate_memory(displace(address, -page_size), needed + 2 * page_size);
}
inline void watchpoint(void *address) {
#if DEBUG
if (address == WatchPoint) __builtin_trap();
#endif
}
uint64_t micro_time();
double nano_time();
class StopWatch {
uint64_t _start_time;
mach_timebase_info_data_t &_timebase;
static mach_timebase_info_data_t &cached_timebase_info();
public:
StopWatch() : _start_time(0), _timebase(cached_timebase_info()) {}
void reset() { _start_time = mach_absolute_time(); }
uint64_t elapsed() { return ((mach_absolute_time() - _start_time) * _timebase.numer) / _timebase.denom; }
};
extern "C" void auto_gc_lock(malloc_zone_t *zone);
extern "C" void auto_gc_unlock(malloc_zone_t *zone);
class MemoryReader {
task_t _task; auto_memory_reader_t _reader; public:
MemoryReader(task_t task, auto_memory_reader_t reader) : _task(task), _reader(reader) {}
inline void *read(void *task_address, usword_t size) {
void *local_address; kern_return_t err = _reader(_task, (vm_address_t)task_address, (vm_size_t)size, &local_address);
if (err) return NULL;
return local_address;
}
};
class Preallocated {
public:
void *operator new(size_t size) { error("Must use alternate form of new operator."); return aux_malloc(size); }
void *operator new(size_t size, void *address) { return address; }
void operator delete(void *x) { }
};
class AuxAllocated {
public:
inline void *operator new(const size_t size) {
void *memory = aux_malloc(size);
if (!memory) error("Failed of allocate memory for auto internal use.");
return memory;
}
inline void operator delete(void *address) {
if (address) aux_free(address);
}
};
template <typename T> struct AuxAllocator {
typedef T value_type;
typedef value_type* pointer;
typedef const value_type *const_pointer;
typedef value_type& reference;
typedef const value_type& const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
template <typename U> struct rebind { typedef AuxAllocator<U> other; };
template <typename U> AuxAllocator(const AuxAllocator<U>&) {}
AuxAllocator() {}
AuxAllocator(const AuxAllocator&) {}
~AuxAllocator() {}
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const {
return x;
}
pointer allocate(size_type n, const_pointer = 0) {
return static_cast<pointer>(aux_calloc(n, sizeof(T)));
}
void deallocate(pointer p, size_type) { ::aux_free(p); }
size_type max_size() const {
return static_cast<size_type>(-1) / sizeof(T);
}
void construct(pointer p, const value_type& x) {
new(p) value_type(x);
}
void destroy(pointer p) { p->~value_type(); }
void operator=(const AuxAllocator&);
};
template<> struct AuxAllocator<void> {
typedef void value_type;
typedef void* pointer;
typedef const void *const_pointer;
template <typename U> struct rebind { typedef AuxAllocator<U> other; };
};
template <typename T>
inline bool operator==(const AuxAllocator<T>&,
const AuxAllocator<T>&) {
return true;
}
template <typename T>
inline bool operator!=(const AuxAllocator<T>&,
const AuxAllocator<T>&) {
return false;
}
struct AuxPointerLess {
bool operator()(const void *p1, const void *p2) const {
return p1 < p2;
}
};
struct AuxPointerEqual {
bool operator()(void *p1, void *p2) const {
return p1 == p2;
}
};
struct AuxPointerHash {
uintptr_t operator()(void *p) const {
return (uintptr_t)p;
}
};
typedef std::vector<void *, AuxAllocator<void *> > PtrVector;
typedef std::map<void *, int, AuxPointerLess, AuxAllocator<std::pair<void * const, int> > > PtrIntMap;
typedef __gnu_cxx::hash_map<void *, void *, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrPtrHashMap;
typedef __gnu_cxx::hash_map<void *, PtrPtrHashMap, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrAssocHashMap;
typedef __gnu_cxx::hash_map<void *, int, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrIntHashMap;
typedef __gnu_cxx::hash_map<void *, size_t, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrSizeHashMap;
typedef __gnu_cxx::hash_set<void *, AuxPointerHash, AuxPointerEqual, AuxAllocator<void *> > PtrHashSet;
template <typename MemoryAllocator>
class PointerArray : AuxAllocated {
usword_t _count;
usword_t _capacity;
void **_buffer;
MemoryAllocator _allocator;
public:
PointerArray() : _count(0), _capacity(0), _buffer(NULL) {}
PointerArray(MemoryAllocator allocator) : _count(0), _capacity(0), _buffer(NULL), _allocator(allocator) {}
~PointerArray() { if (_buffer) _allocator.deallocate_memory(_buffer, _capacity * sizeof(void *)); }
usword_t count() const { return _count; }
void clear_count() { _count = 0; }
void set_count(usword_t n) { _count = n; }
void **buffer() { return _buffer; }
usword_t size() { return _capacity * sizeof(void*); }
void uncommit() { if (_buffer) _allocator.uncommit_memory(_buffer, _capacity * sizeof(void*)); }
void grow() {
if (!_buffer) {
_capacity = page_size / sizeof(void*);
_buffer = (void **) _allocator.allocate_memory(page_size);
} else {
vm_size_t old_size = _capacity * sizeof(void *);
void **new_buffer = (void **) _allocator.allocate_memory(old_size * 2);
if (!new_buffer) {
auto_fatal("PointerArray::grow() _capacity=%lu failed.\n", _capacity * 2);
}
_capacity *= 2;
_allocator.copy_memory(new_buffer, _buffer, old_size);
_allocator.deallocate_memory(_buffer, old_size);
_buffer = new_buffer;
}
}
void grow(usword_t count) {
if (count > _capacity) {
usword_t old_size = _capacity * sizeof(void *);
if (_capacity == 0L) _capacity = page_size / sizeof(void *);
while (count > _capacity) _capacity *= 2;
void **new_buffer = (void **) _allocator.allocate_memory(_capacity * sizeof(void*));
if (!new_buffer) {
auto_fatal("PointerArray::grow(count=%lu) failed.\n", _capacity);
}
if (_buffer) {
if (new_buffer && _count) {
_allocator.copy_memory(new_buffer, _buffer, old_size);
}
_allocator.deallocate_memory(_buffer, old_size);
}
_buffer = new_buffer;
}
}
void add(void *pointer) {
if (_count == _capacity) grow();
_buffer[_count++] = pointer;
}
};
struct PointerChunk : Preallocated {
PointerChunk *_next;
enum { chunk_size = (Auto::page_size / sizeof(void*)) - 1 };
void *_pointers[chunk_size];
void **pointers() { return _pointers; }
void **limit() { return _pointers + chunk_size; }
PointerChunk *next() { return _next; }
};
template <class Visitor> inline void visitPointerChunks(PointerChunk *chunks, usword_t count, Visitor& visitor) {
PointerChunk *chunk = chunks;
while (chunk != NULL) {
PointerChunk *next = chunk->next();
void **pointers = chunk->pointers();
void **limit = pointers + (count > PointerChunk::chunk_size ? PointerChunk::chunk_size : count);
visitor.visitPointerChunk(pointers, limit);
count -= (limit - pointers);
chunk = next;
}
}
inline void visitPointerChunksBlock(PointerChunk *chunks, usword_t count, void (^block) (void **, void**)) {
PointerChunk *chunk = chunks;
while (chunk != NULL) {
PointerChunk *next = chunk->next();
void **pointers = chunk->pointers();
void **limit = pointers + (count > PointerChunk::chunk_size ? PointerChunk::chunk_size : count);
block(pointers, limit);
count -= (limit - pointers);
chunk = next;
}
}
template <typename MemoryAllocator>
class PointerQueue : AuxAllocated {
MemoryAllocator _allocator;
PointerChunk *_head;
PointerChunk *_tail;
PointerChunk *_current;
void **_cursor, **_limit;
usword_t _count;
void next_chunk() {
if (_current == NULL) {
_current = _head;
} else {
_current = _current->_next;
}
if (_current == NULL) {
_current = (PointerChunk *)_allocator.allocate_memory(sizeof(PointerChunk));
if (_head == NULL) {
_head = _tail = _current;
} else {
_tail->_next = _current;
_tail = _current;
}
}
_cursor = _current->pointers();
_limit = _current->limit();
}
public:
PointerQueue() : _head(NULL), _tail(NULL), _current(NULL), _cursor(NULL), _limit(NULL), _count(0) {}
PointerQueue(MemoryAllocator allocator) : _allocator(allocator), _head(NULL), _tail(NULL), _current(NULL), _cursor(NULL), _limit(NULL), _count(0) {}
~PointerQueue() {
PointerChunk *chunk = _head;
while (chunk != NULL) {
PointerChunk *next = chunk->_next;
_allocator.deallocate_memory(chunk, sizeof(PointerChunk));
chunk = next;
}
}
void add(void *pointer) {
if (_cursor == _limit) next_chunk();
*_cursor++ = pointer;
++_count;
}
void reset() {
_current = NULL;
_cursor = _limit = NULL;
_count = 0;
}
PointerChunk *chunks() { return _head; }
usword_t count() { return _count; }
usword_t size() {
usword_t size = 0;
PointerChunk *chunk = _head;
while (chunk != NULL) {
size += Auto::page_size;
chunk = chunk->_next;
}
return size;
}
};
class VMMemoryAllocator {
public:
inline void *allocate_memory(usword_t size) {
return Auto::allocate_memory(size);
}
inline void deallocate_memory(void *address, usword_t size) {
Auto::deallocate_memory(address, size);
}
inline void uncommit_memory(void *address, usword_t size) {
Auto::uncommit_memory(address, size);
}
inline void copy_memory(void *dest, void *source, usword_t size) {
Auto::copy_memory(dest, source, size);
}
};
};
#endif // __AUTO_DEFS__