#include "AutoAdmin.h"
#include "AutoBitmap.h"
#include "AutoBlockIterator.h"
#include "AutoCollector.h"
#include "AutoConfiguration.h"
#include "AutoDefs.h"
#include "AutoEnvironment.h"
#include "AutoLarge.h"
#include "AutoLock.h"
#include "AutoRange.h"
#include "AutoRegion.h"
#include "AutoStatistics.h"
#include "AutoSubzone.h"
#include "AutoMemoryScanner.h"
#include "AutoThread.h"
#include "AutoWriteBarrierIterator.h"
#include "AutoThreadLocalCollector.h"
#include "AutoZone.h"
#include "auto_weak.h"
#include "auto_trace.h"
#include "auto_dtrace.h"
#include <mach-o/dyld.h>
#include <mach-o/ldsyms.h>
#include <sys/mman.h>
struct auto_zone_cursor {
auto_zone_t *zone;
size_t garbage_count;
const vm_address_t *garbage;
volatile unsigned index;
size_t block_count;
size_t byte_count;
};
namespace Auto {
#if defined(DEBUG)
#warning DEBUG is set
#endif
Zone *Zone::_first_zone = NULL;
volatile int32_t Zone::_zone_count = 0;
void Zone::setup_shared() {
Environment::initialize();
if (!aux_zone && !Zone::zone()) {
aux_zone = malloc_default_zone();
}
}
pthread_key_t Zone::allocate_thread_key() {
pthread_key_t key = __sync_fetch_and_add(&_zone_count, 1) + __PTK_FRAMEWORK_GC_KEY0;
if (key <= __PTK_FRAMEWORK_GC_KEY9)
return key;
return 0;
}
Zone::Zone(pthread_key_t thread_registration_key)
: _registered_threads_key(thread_registration_key), _page_allocator(_stats), _garbage_list(_page_allocator)
{
ASSERTION(page_size == vm_page_size);
NSLookupSymbolInImage((const mach_header*)&_mh_dylib_header, "___", NSLOOKUPSYMBOLINIMAGE_OPTION_BIND_FULLY);
static dispatch_once_t is_auto_initialized = 0;
dispatch_once(&is_auto_initialized, ^{ setup_shared(); });
void *next = displace(this, admin_offset());
_registered_threads = NULL;
pthread_key_init_np(_registered_threads_key, destroy_registered_thread);
pthread_mutexattr_t mutex_attr;
pthread_mutexattr_init(&mutex_attr);
pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&_registered_threads_mutex, &mutex_attr);
pthread_mutexattr_destroy(&mutex_attr);
pthread_mutex_init(&_mark_bits_mutex, NULL);
_enlivening_enabled = false;
_enlivening_complete = false;
_in_subzone.initialize(subzone_quantum_max, next);
next = displace(next, Bitmap::bytes_needed(subzone_quantum_max));
_in_large.initialize(allocate_quantum_large_max, next);
next = displace(next, Bitmap::bytes_needed(allocate_quantum_large_max));
#if UseArena
_large_bits.initialize(allocate_quantum_large_max, next);
_large_bits_lock = 0;
next = displace(next, Bitmap::bytes_needed(allocate_quantum_large_max));
_arena = allocate_memory(1ul << arena_size_log2, 1ul << arena_size_log2);
if (!_arena) {
auto_fatal("can't allocate arena for GC\n");
}
_large_start = NULL;
_coverage.set_range(_arena, 1ul << arena_size_log2);
#else
_coverage.set_range((void *)~0, (void *)0);
#endif
_small_admin.initialize(this, allocate_quantum_small_log2);
_medium_admin.initialize(this, allocate_quantum_medium_log2);
_large_list = NULL;
_large_lock = 0;
_roots_lock = 0;
_datasegments_lock = 0;
_zombies_lock = 0;
_region_list = NULL;
_region_lock = 0;
_retains_lock = 0;
_coverage_lock = 0;
_repair_write_barrier = false;
#if GLOBAL_ENLIVENING_QUEUE
_needs_enlivening.state = false;
_needs_enlivening.lock = 0;
#endif
_state = idle;
_stats.reset();
usword_t data_size = bytes_needed();
_stats.add_admin(data_size);
_allocation_threshold = 0;
allocate_region();
if (_first_zone == NULL)
_first_zone = this;
}
Zone::~Zone() {
for (Large *large = _large_list; large; ) {
Large *next = large->next();
large->deallocate(this);
large = next;
}
for (Region *region = _region_list; region != NULL; region = region->next()) {
Region *next = region->next();
delete region;
region = next;
}
_region_list = NULL;
if (_registered_threads != NULL)
auto_error(this, "~Zone(): registered threads list not empty\n", NULL);
}
#if UseArena
void *Zone::arena_allocate_large(usword_t size) {
usword_t seeksize = (size + allocate_quantum_large - 1) & ~(allocate_quantum_large-1);
usword_t nbits = seeksize >> allocate_quantum_large_log2;
usword_t start = 0;
usword_t end = ((1ul << arena_size_log2) - ((uintptr_t)_large_start - (uintptr_t)_arena)) >> allocate_quantum_large_log2;
if (nbits > (end - start)) {
return NULL;
}
end -= nbits; SpinLock lock(&_large_bits_lock);
while (start <= end) {
if (_large_bits.bits_are_clear(start, nbits)) {
_large_bits.set_bits(start, nbits);
void *address = displace(_large_start, start << allocate_quantum_large_log2);
madvise(address, seeksize, MADV_FREE_REUSE);
return address;
}
start += 1;
}
return NULL;
}
void *Zone::arena_allocate_region(usword_t newsize) {
if (_large_start) return NULL;
usword_t roundedsize = (newsize + subzone_quantum - 1) & ~(subzone_quantum-1);
_large_start = displace(_arena, roundedsize);
return _arena;
}
void Zone::arena_deallocate(void *address, size_t size) {
usword_t seeksize = (size + allocate_quantum_large - 1) & ~(allocate_quantum_large-1);
usword_t nbits = seeksize >> allocate_quantum_large_log2;
usword_t start = ((char *)address - (char *)_large_start) >> allocate_quantum_large_log2;
SpinLock lock(&_large_bits_lock);
_large_bits.clear_bits(start, nbits);
madvise(address, seeksize, MADV_FREE_REUSABLE);
}
#else
void *Zone::arena_allocate_large(usword_t size) {
return allocate_memory(size, allocate_quantum_large, VM_MEMORY_MALLOC_LARGE);
}
void Zone::arena_deallocate(void *address, size_t size) {
deallocate_memory(address, size);
}
#endif
Region *Zone::allocate_region() {
SpinLock lock(&_region_lock);
if (_region_list && _region_list->subzones_remaining() != 0) return _region_list;
Region *region = Region::new_region(this);
if (region) {
{
SpinLock lock(&_coverage_lock);
_coverage.expand_range(*region);
}
region->set_next(_region_list);
_region_list = region;
if (!_scan_stack.is_allocated()) {
_scan_stack.set_range(region->scan_space());
}
}
return region;
}
void *Zone::allocate_large(Thread &thread, usword_t &size, const unsigned layout, bool clear, bool refcount_is_one) {
Large *large = Large::allocate(this, size, layout, refcount_is_one);
void *address;
{
SpinLock lock(&_large_lock);
EnliveningHelper<ConditionBarrier> barrier(thread);
if (large) {
address = large->address();
_in_large.set_bit(Large::quantum_index(address));
if (barrier) barrier.enliven_block(address);
large->add(_large_list);
} else {
return NULL;
}
}
size = large->size();
#if UseArena
if (clear || !(layout & AUTO_UNSCANNED)) {
bzero(address, size);
}
#endif
{
SpinLock lock(&_coverage_lock);
Range large_range(address, size);
_coverage.expand_range(large_range);
}
_stats.add_count(1);
_stats.add_size(size);
_stats.add_dirty(size);
_stats.add_allocated(size);
adjust_threshold_counter(size);
return address;
}
void Zone::deallocate_large(void *block) {
Large *large = Large::large(block);
usword_t size = large->size();
_stats.add_count(-1);
_stats.add_size(-size); _stats.add_allocated(-size); _stats.add_dirty(-size);
SpinLock lock(&_large_lock);
large->remove(_large_list);
_in_large.clear_bit(Large::quantum_index(block));
large->deallocate(this);
}
static inline bool locked(spin_lock_t *lock) {
TrySpinLock attempt(lock);
return !attempt;
}
static inline bool locked(pthread_mutex_t *lock) {
TryMutex attempt(lock);
return !attempt;
}
bool Zone::is_locked() {
bool result = (locked(_small_admin.lock()) || locked(_medium_admin.lock()) ||
locked(&weak_refs_table_lock) || locked(&_large_lock) || locked(&_roots_lock) ||
locked(&_datasegments_lock) || locked(&_zombies_lock) || locked(&_region_lock) ||
locked(&_retains_lock) || locked(&_coverage_lock) || locked(&_associations_lock) ||
#if UseArena
locked(&_large_bits_lock) ||
#endif
locked(&_registered_threads_mutex));
if (!result) {
Thread *thread = current_thread();
if (thread != NULL) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
if (locked(&needs_enlivening.lock))
return true;
}
for (Region *region = _region_list; region != NULL; region = region->next()) {
if (locked(region->subzone_lock())) {
return true;
}
}
}
return result;
}
bool Zone::add_subzone(Admin *admin) {
control.will_grow((auto_zone_t *)this, AUTO_HEAP_SUBZONE_EXHAUSTED);
if (!_region_list->add_subzone(admin)) {
control.will_grow((auto_zone_t *)this, AUTO_HEAP_REGION_EXHAUSTED);
if (allocate_region() == NULL) return false;
}
return true;
}
void *Zone::block_allocate(Thread &thread, const size_t size, const unsigned layout, bool clear, bool refcount_is_one) {
void *block;
usword_t allocated_size = size;
bool did_grow = false, is_large = false;
if (!allocated_size) allocated_size = 1;
if (allocated_size <= (allocate_quantum_small * max_cached_small_multiple)) {
const bool cannotFinalizeNow = false;
if (ThreadLocalCollector::should_collect(this, thread, cannotFinalizeNow)) {
ThreadLocalCollector tlc(this, (void*)auto_get_sp(), thread);
tlc.collect(cannotFinalizeNow);
}
do {
block = _small_admin.thread_cache_allocate(thread, allocated_size, layout, refcount_is_one, did_grow);
} while (!block && add_subzone(&_small_admin));
} else if (allocated_size < allocate_quantum_medium) {
do {
block = _small_admin.find_allocation(thread, allocated_size, layout, refcount_is_one, did_grow);
} while (!block && add_subzone(&_small_admin));
} else if (allocated_size < allocate_quantum_large) {
do {
block = _medium_admin.find_allocation(thread, allocated_size, layout, refcount_is_one, did_grow);
} while (!block && add_subzone(&_medium_admin));
} else {
block = allocate_large(thread, allocated_size, layout, clear, refcount_is_one);
is_large = true;
}
if (block == NULL) return NULL;
_stats.add_count(1);
_stats.add_size(allocated_size);
adjust_threshold_counter(allocated_size);
if (threshold_reached() && multithreaded) {
auto_collect((auto_zone_t *)this, AUTO_COLLECT_RATIO_COLLECTION, NULL); }
if (did_grow) {
control.will_grow((auto_zone_t *)this, AUTO_HEAP_HOLES_EXHAUSTED);
}
if (is_large) return block;
if (clear) {
void **end = (void **)displace(block, allocated_size);
switch (allocated_size/sizeof(void *)) {
case 12: end[-12] = NULL;
case 11: end[-11] = NULL;
case 10: end[-10] = NULL;
case 9: end[-9] = NULL;
case 8: end[-8] = NULL;
case 7: end[-7] = NULL;
case 6: end[-6] = NULL;
case 5: end[-5] = NULL;
case 4: end[-4] = NULL;
case 3: end[-3] = NULL;
case 2: end[-2] = NULL;
case 1: end[-1] = NULL;
case 0: break;
default:
bzero(block, allocated_size);
break;
}
}
#if RECORD_REFCOUNT_STACKS
if (AUTO_RECORD_REFCOUNT_STACKS) {
auto_record_refcount_stack(this, ptr, 0);
}
#endif
#if LOG_TIMINGS
size_t allocated = _stats.size();
if ((allocated & ~(LOG_ALLOCATION_THRESHOLD-1)) != ((allocated - size) & ~(LOG_ALLOCATION_THRESHOLD-1)))
log_allocation_threshold(auto_date_now(), _stats.size(), _stats.allocated());
#endif
return block;
}
void Zone::block_deallocate(void *block) {
erase_associations(block);
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return; SpinLock adminLock(subzone->admin()->lock());
dec_refcount_small_medium(subzone, block);
int layout = subzone->layout(block);
if (layout & AUTO_OBJECT)
erase_weak(block);
if (((layout & AUTO_UNSCANNED) == AUTO_UNSCANNED) && !_enlivening_enabled) {
subzone->admin()->deallocate_no_lock(block); }
else {
subzone->set_layout(block, AUTO_MEMORY_UNSCANNED);
}
} else if (in_large_memory(block) && Large::is_start(block)) {
Large *large = Large::large(block);
int layout = large->layout();
if (layout & AUTO_OBJECT)
erase_weak(block);
large->set_layout(AUTO_MEMORY_UNSCANNED);
#if USE_DISPATCH_QUEUE
if (collection_queue) {
Zone *zone = this;
dispatch_async(collection_queue, ^{ zone->deallocate_large(block); });
}
#else
block_decrement_refcount(block);
#endif
} else {
error("Deallocating a non-block", block);
}
}
void *Zone::block_start_large(void *address) {
if (_coverage.in_range(address)) {
SpinLock lock(&_large_lock); usword_t q = Large::quantum_index(address);
if (!_in_large.bit(q)) {
q = _in_large.previous_set(q);
if (q == not_found) return NULL;
}
#if UseArena
Large *large = Large::quantum_large(q, _arena);
#else
Large *large = Large::quantum_large(q, (void *)0);
#endif
if (!large->range().in_range(address)) return NULL;
return large->address();
}
return NULL;
}
void *Zone::block_start(void *address) {
if (in_subzone_memory(address)) {
Subzone *subzone = Subzone::subzone(address);
return subzone->block_start(address);
} else {
return block_start_large(address);
}
}
usword_t Zone::block_size(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (subzone->block_is_start(block)) return subzone->size(block);
} else if (in_large_memory(block) && Large::is_start(block)) {
return Large::size(block);
}
return 0;
}
int Zone::block_layout(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return AUTO_TYPE_UNKNOWN; return subzone->layout(block);
} else if (in_large_memory(block) && Large::is_start(block)) {
return Large::layout(block);
}
return AUTO_TYPE_UNKNOWN;
}
void Zone::block_set_layout(void *block, int layout) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return; SpinLock lock(subzone->admin()->lock());
subzone->set_layout(block, layout);
} else if (in_large_memory(block) && Large::is_start(block)) {
Large::set_layout(block, layout);
}
}
int Zone::get_refcount_small_medium(Subzone *subzone, void *block) {
int refcount = subzone->refcount(block);
if (refcount == 3) {
SpinLock lock(&_retains_lock);
PtrIntHashMap::iterator retain_iter = _retains.find(block);
if (retain_iter != _retains.end() && retain_iter->first == block) {
refcount = retain_iter->second;
}
}
return refcount;
}
int Zone::inc_refcount_small_medium(Subzone *subzone, void *block) {
usword_t q = subzone->quantum_index(block);
int refcount = subzone->refcount(q);
if (refcount == 3) {
SpinLock lock(&_retains_lock);
PtrIntHashMap::iterator retain_iter = _retains.find(block);
if (retain_iter != _retains.end() && retain_iter->first == block) {
refcount = ++retain_iter->second;
} else {
refcount = (_retains[block] = 4);
}
} else {
if (refcount == 0) {
Thread &thread = registered_thread();
thread.block_escaped(this, subzone, block);
}
subzone->incr_refcount(q);
++refcount;
}
return refcount;
}
int Zone::dec_refcount_small_medium(Subzone *subzone, void *block) {
usword_t q = subzone->quantum_index(block);
int refcount = subzone->refcount(q);
if (refcount == 3) {
SpinLock lock(&_retains_lock);
PtrIntHashMap::iterator retain_iter = _retains.find(block);
if (retain_iter != _retains.end() && retain_iter->first == block) {
if (--retain_iter->second == 3) {
_retains.erase(retain_iter);
return 3;
} else {
return retain_iter->second;
}
} else {
subzone->decr_refcount(q);
return 2;
}
} else if (refcount > 0) {
subzone->decr_refcount(q);
return refcount - 1;
}
malloc_printf("reference count underflow for %p, break on auto_refcount_underflow_error to debug.\n", block);
auto_refcount_underflow_error(block);
return -1;
}
int Zone::block_refcount(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return 0; return get_refcount_small_medium(Subzone::subzone(block), block);
} else if (in_large_memory(block) && Large::is_start(block)) {
SpinLock lock(&_large_lock);
return Large::refcount(block);
}
return 0;
}
#if 0
void Zone::testRefcounting(void *block) {
for (int j = 0; j < 7; ++j) {
printf("\nloop start refcount is %d for %p\n", block_refcount(block), block);
for (int i = 0; i < 5; ++i) {
block_increment_refcount(block);
printf("after increment, it now has refcount %d\n", block_refcount(block));
}
for (int i = 0; i < 5; ++i) {
block_decrement_refcount(block);
printf("after decrement, it now has refcount %d\n", block_refcount(block));
}
for (int i = 0; i < 5; ++i) {
block_increment_refcount(block);
printf("after increment, it now has refcount %d\n", block_refcount(block));
}
for (int i = 0; i < 5; ++i) {
block_decrement_refcount(block);
printf("after decrement, it now has refcount %d\n", block_refcount(block));
}
printf("maturing block...\n");
Subzone::subzone(block)->mature(block);
}
}
#endif
int Zone::block_increment_refcount(void *block) {
int refcount = 0;
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return 0; SpinLock lock(subzone->admin()->lock());
refcount = inc_refcount_small_medium(subzone, block);
if (refcount == 1) {
EnliveningHelper<ConditionBarrier> barrier(registered_thread());
if (barrier && !block_is_marked(block)) barrier.enliven_block(block);
}
} else if (in_large_memory(block) && Large::is_start(block)) {
SpinLock lock(&_large_lock);
refcount = Large::refcount(block) + 1;
Large::set_refcount(block, refcount);
if (refcount == 1) {
EnliveningHelper<ConditionBarrier> barrier(registered_thread());
if (barrier && !block_is_marked(block)) barrier.enliven_block(block);
}
}
return refcount;
}
int Zone::block_decrement_refcount(void *block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return 0; SpinLock lock(subzone->admin()->lock());
return dec_refcount_small_medium(subzone, block);
} else if (in_large_memory(block) && Large::is_start(block)) {
SpinLock lock(&_large_lock);
int refcount = Large::refcount(block);
if (refcount <= 0) {
malloc_printf("reference count underflow for %p, break on auto_refcount_underflow_error to debug\n", block);
auto_refcount_underflow_error(block);
}
else {
refcount = refcount - 1;
Large::set_refcount(block, refcount);
}
return refcount;
}
return 0;
}
void Zone::block_refcount_and_layout(void *block, int *refcount, int *layout) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
if (!subzone->block_is_start(block)) return; SpinLock lock(subzone->admin()->lock());
*refcount = get_refcount_small_medium(subzone, block);
*layout = subzone->layout(block);
} else if (in_large_memory(block) && Large::is_start(block)) {
SpinLock lock(&_large_lock);
Large *large = Large::large(block);
*refcount = large->refcount();
*layout = large->layout();
}
}
void Zone::set_associative_ref(void *block, void *key, void *value) {
if (value) {
Thread &thread = registered_thread();
thread.block_escaped(this, NULL, value);
thread.block_escaped(this, NULL, block);
EnliveningHelper<UnconditionalBarrier> barrier(thread);
SpinLock lock(&_associations_lock);
AssocationsHashMap::iterator i = _associations.find(block);
ObjectAssocationHashMap* refs = (i != _associations.end() ? i->second : NULL);
if (refs == NULL) {
refs = new ObjectAssocationHashMap();
_associations[block] = refs;
}
(*refs)[key] = value;
if (barrier) barrier.enliven_block(value);
} else {
SpinLock lock(&_associations_lock);
AssocationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssocationHashMap *refs = i->second;
ObjectAssocationHashMap::iterator j = refs->find(key);
if (j != refs->end()) {
refs->erase(j);
}
}
}
}
void *Zone::get_associative_ref(void *block, void *key) {
SpinLock lock(&_associations_lock);
AssocationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssocationHashMap *refs = i->second;
ObjectAssocationHashMap::iterator j = refs->find(key);
if (j != refs->end()) return j->second;
}
return NULL;
}
inline void Zone::erase_associations_internal(void *block) {
AssocationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssocationHashMap *refs = i->second;
_associations.erase(i);
delete refs;
}
}
void Zone::erase_associations(void *block) {
SpinLock lock(&_associations_lock);
erase_associations_internal(block);
}
void Zone::scan_associations(MemoryScanner &scanner) {
SpinLock lock(&_associations_lock);
_scanning_associations = true;
for (AssocationsHashMap::iterator i = _associations.begin(); i != _associations.end(); i++) {
void *address = i->first;
if (associations_should_be_marked(address)) {
ObjectAssocationHashMap *refs = i->second;
for (ObjectAssocationHashMap::iterator j = refs->begin(); j != refs->end(); j++) {
scanner.associate_block((void**)address, j->first, j->second);
}
}
}
scanner.scan_pending_until_done();
_scanning_associations = false;
}
void Zone::pend_associations(void *block, MemoryScanner &scanner) {
AssocationsHashMap::iterator i = _associations.find(block);
if (i != _associations.end()) {
ObjectAssocationHashMap *refs = i->second;
for (ObjectAssocationHashMap::iterator j = refs->begin(); j != refs->end(); j++) {
scanner.associate_block((void**)block, j->first, j->second);
}
}
}
void Zone::erase_associations_in_range(const Range &r) {
SpinLock lock(&_associations_lock);
PtrVector associationsToRemove;
for (AssocationsHashMap::iterator i = _associations.begin(); i != _associations.end(); i++) {
if (r.in_range(i->first)) associationsToRemove.push_back(i->first);
}
for (PtrVector::iterator i = associationsToRemove.begin(); i != associationsToRemove.end(); i++) {
erase_associations_internal(*i);
}
}
bool Zone::set_write_barrier(Thread &thread, void *address, void *value) {
if (in_subzone_memory(address)) {
Subzone *subzone = Subzone::subzone(address);
thread.track_local_assignment(this, address, value);
EnliveningHelper<UnconditionalBarrier> barrier(thread);
if (barrier && !block_is_marked(value)) barrier.enliven_block(value);
*(void **)address = value;
subzone->write_barrier().mark_card(address);
return true;
} else if (thread.is_stack_address(address)) {
*(void **)address = value;
return true;
} else if (void *block = block_start_large(address)) {
Large *large = Large::large(block);
EnliveningHelper<UnconditionalBarrier> barrier(thread);
if (barrier && !block_is_marked(value)) barrier.enliven_block(value);
*(void **)address = value;
thread.block_escaped(this, NULL, value);
if (large->is_scanned()) large->write_barrier().mark_card(address);
return true;
}
thread.block_escaped(this, NULL, value);
return false;
}
bool Zone::set_write_barrier_range(void *destination, const usword_t size) {
if (in_subzone_memory(destination)) {
Subzone *subzone = Subzone::subzone(destination);
subzone->write_barrier().mark_cards(destination, size);
return true;
} else if (void *block = block_start_large(destination)) {
Large *large = Large::large(block);
if (large->is_scanned()) large->write_barrier().mark_cards(destination, size);
return true;
}
return false;
}
bool Zone::set_write_barrier(void *address) {
if (in_subzone_memory(address)) {
Subzone *subzone = Subzone::subzone(address);
subzone->write_barrier().mark_card(address);
return true;
}
else if (void *block = block_start_large(address)) {
Large *large = Large::large(block);
if (large->is_scanned()) large->write_barrier().mark_card(address);
return true;
}
return false;
}
struct mark_write_barriers_untouched_visitor {
usword_t _count;
mark_write_barriers_untouched_visitor() : _count(0) {}
inline bool visit(Zone *zone, WriteBarrier &wb) {
_count += wb.mark_cards_untouched();
return true;
}
};
void Zone::mark_write_barriers_untouched() {
mark_write_barriers_untouched_visitor visitor;
visitWriteBarriers(this, visitor);
}
struct clear_untouched_write_barriers_visitor {
usword_t _count;
clear_untouched_write_barriers_visitor() : _count(0) {}
inline bool visit(Zone *zone, WriteBarrier &wb) {
_count += wb.clear_untouched_cards();
return true;
}
};
void Zone::clear_untouched_write_barriers() {
clear_untouched_write_barriers_visitor visitor;
visitWriteBarriers(this, visitor);
}
struct clear_all_write_barriers_visitor {
inline bool visit(Zone *zone, WriteBarrier &wb) {
wb.clear();
return true;
}
};
void Zone::clear_all_write_barriers() {
clear_all_write_barriers_visitor visitor;
visitWriteBarriers(this, visitor);
}
struct reset_all_marks_visitor {
inline bool visit(Zone *zone, Subzone *subzone, usword_t q, void *block) {
subzone->clear_mark(q);
return true;
}
inline bool visit(Zone *zone, Large *large, void *block) {
large->clear_mark();
return true;
}
};
void Zone::reset_all_marks() {
#if 1
for (Region *region = _region_list; region != NULL; region = region->next()) {
region->clear_all_marks();
}
SpinLock lock(&_large_lock);
for (Large *large = _large_list; large != NULL; large = large->next()) {
large->clear_mark();
}
#else
reset_all_marks_visitor visitor;
BlockIterator<reset_all_marks_visitor> iterator(this, visitor);
iterator.visit();
#endif
}
struct reset_all_marks_and_pending_visitor {
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
subzone->clear_mark(q);
subzone->clear_pending(q);
return true;
}
inline bool visit(Zone *zone, Large *large) {
large->clear_mark();
large->clear_pending();
return true;
}
};
void Zone::reset_all_marks_and_pending() {
for (Region *region = _region_list; region != NULL; region = region->next()) {
region->clear_all_marks();
region->clear_all_pending();
}
SpinLock lock(&_large_lock);
for (Large *large = _large_list; large != NULL; large = large->next()) {
large->clear_mark();
large->clear_pending();
}
}
struct statistics_visitor {
Statistics &_stats;
Region *_last_region;
Subzone *_last_subzone;
statistics_visitor(Statistics &stats)
: _stats(stats)
, _last_region(NULL)
, _last_subzone(NULL)
{}
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
if (_last_region != subzone->region()) {
_last_region = subzone->region();
_stats.add_admin(Region::bytes_needed());
}
if (_last_subzone != subzone) {
_last_subzone = subzone;
_stats.add_admin(subzone_write_barrier_max);
_stats.add_allocated(subzone->allocation_size());
_stats.add_dirty(subzone->allocation_size());
}
_stats.add_count(1);
_stats.add_size(subzone->size(q));
return true;
}
inline bool visit(Zone *zone, Large *large) {
_stats.add_admin(large->vm_size() - large->size());
_stats.add_count(1);
_stats.add_size(large->size());
return true;
}
};
void Zone::statistics(Statistics &stats) {
statistics_visitor visitor(stats);
visitAllocatedBlocks(this, visitor);
}
void Zone::malloc_statistics(malloc_statistics_t *stats) {
stats->blocks_in_use = 0;
stats->size_in_use = 0;
stats->max_size_in_use = 0;
stats->size_allocated = 0;
{
SpinLock lock(&_large_lock);
Large *l = _large_list;
while (l) {
stats->blocks_in_use++;
stats->size_in_use += l->vm_size();
stats->max_size_in_use += l->vm_size();
stats->size_allocated += l->vm_size();
l = l->next();
}
}
{
SpinLock lock(&_region_lock);
Region *r = _region_list;
while (r) {
r->malloc_statistics(stats);
r = r->next();
}
}
}
void Zone::set_needs_enlivening() {
close_locks();
Mutex lock(&_registered_threads_mutex);
_enlivening_enabled = true;
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
assert(needs_enlivening.state == false);
SpinLock lock(&needs_enlivening.lock);
needs_enlivening.state = true;
}
open_locks();
}
class scan_barrier_visitor {
MemoryScanner &_scanner;
public:
scan_barrier_visitor(MemoryScanner &scanner) : _scanner(scanner) {}
void visitPointerChunk(void **pointers, void **limit) {
while (pointers < limit) {
_scanner.repend(*pointers++);
}
}
};
void Zone::enlivening_barrier(MemoryScanner &scanner) {
scan_barrier_visitor visitor(scanner);
pthread_mutex_lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
EnliveningQueue &enlivening_queue = thread->enlivening_queue();
LockedBoolean &needs_enlivening = enlivening_queue.needs_enlivening();
spin_lock(&needs_enlivening.lock);
visitPointerChunks(enlivening_queue.chunks(), enlivening_queue.count(), visitor);
enlivening_queue.reset();
}
_enlivening_complete = true;
}
void Zone::clear_needs_enlivening() {
_enlivening_enabled = false;
_enlivening_complete = false;
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
LockedBoolean &needs_enlivening = thread->needs_enlivening();
assert(needs_enlivening.state && (!__is_threaded || needs_enlivening.lock != 0));
needs_enlivening.state = false;
spin_unlock(&needs_enlivening.lock);
}
pthread_mutex_unlock(&_registered_threads_mutex);
}
bool Zone::block_collector() {
if (pthread_mutex_trylock(&_mark_bits_mutex) != 0)
return false;
if (pthread_mutex_trylock(&_registered_threads_mutex) != 0) {
pthread_mutex_unlock(&_mark_bits_mutex);
return false;
}
return true;
}
void Zone::unblock_collector() {
pthread_mutex_unlock(&_registered_threads_mutex);
pthread_mutex_unlock(&_mark_bits_mutex);
}
void Zone::collect(bool is_partial, void *current_stack_bottom, auto_date_t *enliveningBegin) {
GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)this, AUTO_TRACE_SCANNING_PHASE);
set_needs_enlivening();
Collector collector(this, current_stack_bottom, is_partial);
pthread_mutex_lock(&_mark_bits_mutex);
collector.collect(false);
if (_scan_stack.is_overflow()) {
_stats.increment_stack_overflow_count();
clear_needs_enlivening();
reset_all_marks_and_pending();
set_needs_enlivening();
collector.collect(true);
}
_scan_stack.reset();
GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)this, AUTO_TRACE_SCANNING_PHASE, (uint64_t)collector.blocks_scanned(), (uint64_t)collector.bytes_scanned());
auto_weak_callback_block_t *callbacks = NULL;
*enliveningBegin = collector.scan_end;
_stats.increment_gc_count(is_partial);
_garbage_list.clear_count();
scavenge_blocks();
if (has_weak_references()) {
GARBAGE_COLLECTION_COLLECTION_PHASE_BEGIN((auto_zone_t*)this, AUTO_TRACE_WEAK_REFERENCE_PHASE);
uintptr_t weak_referents, weak_references;
callbacks = weak_clear_references(this, _garbage_list.count(), (vm_address_t *)_garbage_list.buffer(), &weak_referents, &weak_references);
GARBAGE_COLLECTION_COLLECTION_PHASE_END((auto_zone_t*)this, AUTO_TRACE_WEAK_REFERENCE_PHASE, (uint64_t)weak_referents, (uint64_t)(weak_references * sizeof(void*)));
}
if (!is_partial) {
if (!_repair_write_barrier) {
_repair_write_barrier = true;
mark_write_barriers_untouched();
}
} else if (_repair_write_barrier) {
clear_untouched_write_barriers();
_repair_write_barrier = false;
}
clear_needs_enlivening();
reset_all_marks();
pthread_mutex_unlock(&_mark_bits_mutex);
recycle_threads();
weak_call_callbacks(callbacks);
if (Environment::print_stats) {
malloc_printf("cnt=%d, sz=%d, max=%d, al=%d, admin=%d\n",
_stats.count(),
_stats.size(),
_stats.dirty_size(),
_stats.allocated(),
_stats.admin_size());
}
}
struct scavenge_blocks_visitor {
PointerList& _list;
scavenge_blocks_visitor(PointerList& list) : _list(list) {}
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
if (subzone->is_thread_local(q)) return true;
if (subzone->is_new(q)) subzone->mature(q);
if (!subzone->is_marked(q)) {
subzone->mark_global_garbage(q);
_list.add(subzone->quantum_address(q));
}
return true;
}
inline bool visit(Zone *zone, Large *large) {
if (large->is_new()) large->mature();
if (!large->is_marked()) {
large->mark_garbage();
_list.add(large->address());
}
return true;
}
};
void Zone::scavenge_blocks() {
scavenge_blocks_visitor visitor(_garbage_list);
visitAllocatedBlocks(this, visitor);
}
void Zone::recycle_threads() {
Thread *unbound_threads = NULL;
{
Mutex lock(&_registered_threads_mutex);
Thread::scavenge_threads(&_registered_threads, &unbound_threads);
}
while (unbound_threads != NULL) {
Thread *next = unbound_threads->next();
delete unbound_threads;
unbound_threads = next;
}
}
static void foreach_block_do(auto_zone_cursor_t cursor, void (*op) (void *ptr, void *data), void *data) {
Zone *azone = (Auto::Zone *)cursor->zone;
while (cursor->index < cursor->garbage_count) {
void *ptr = (void *)cursor->garbage[cursor->index++];
auto_memory_type_t type = auto_zone_get_layout_type((auto_zone_t *)azone, ptr);
if (type & AUTO_OBJECT) {
#if DEBUG
if (ptr == WatchPoint) {
malloc_printf("auto_zone invalidating watchpoint: %p\n", WatchPoint);
}
#endif
op(ptr, data);
cursor->block_count++;
cursor->byte_count += azone->block_size(ptr);
}
}
}
void Zone::invalidate_garbage(const size_t garbage_count, const vm_address_t *garbage) {
#if DEBUG
for (size_t index = 0; index < garbage_count; index++) {
void *ptr = (void *)garbage[index];
int rc = block_refcount(ptr);
if (rc > 0)
malloc_printf("invalidate_garbage: garbage ptr = %p, has non-zero refcount = %d\n", ptr, rc);
}
#endif
struct auto_zone_cursor cursor = { (auto_zone_t *)this, garbage_count, garbage, 0, 0, 0 };
if (control.batch_invalidate) {
control.batch_invalidate((auto_zone_t *)this, foreach_block_do, &cursor, sizeof(cursor));
}
}
static inline void zombify(Auto::Zone *azone, void *ptr) {
if ((azone->block_layout(ptr) & AUTO_OBJECT)) azone->erase_weak(ptr);
if (azone->control.resurrect) azone->control.resurrect((auto_zone_t*)azone, ptr);
azone->block_set_layout(ptr, AUTO_OBJECT_UNSCANNED);
}
void Zone::handle_overretained_garbage(void *block, int rc) {
char *name;
auto_memory_type_t layout = block_layout(block);
if ((layout & AUTO_OBJECT) == AUTO_OBJECT) {
if (control.name_for_address) {
name = control.name_for_address((auto_zone_t *)this, (vm_address_t)block, 0);
} else {
name = (char *)"object";
}
} else {
name = (char *)"non-object";
}
malloc_printf("garbage block %p(%s) was over-retained during finalization, refcount = %d\n"
"This could be an unbalanced CFRetain(), or CFRetain() balanced with -release.\n"
"Break on auto_zone_resurrection_error() to debug.\n", block, name, rc);
auto_zone_resurrection_error();
if (Auto::Environment::resurrection_is_fatal) {
auto_fatal("fatal resurrection error for garbage block %p(%s): over-retained during finalization, refcount = %d", block, name, rc);
}
if (((layout & AUTO_OBJECT) == AUTO_OBJECT) && control.name_for_address) free(name);
}
size_t Zone::free_garbage(boolean_t generational, const size_t garbage_count, vm_address_t *garbage, size_t &blocks_freed, size_t &bytes_freed) {
size_t index;
SpinLock lock(associations_lock());
blocks_freed = bytes_freed = 0;
for (index = 0; index < garbage_count; index++) {
void *ptr = (void *)garbage[index];
int rc = block_refcount(ptr);
if (rc == 0) {
if ((block_layout(ptr) & AUTO_OBJECT)) erase_weak(ptr);
blocks_freed++;
bytes_freed += block_size(ptr);
if (malloc_logger) malloc_logger(MALLOC_LOG_TYPE_DEALLOCATE | MALLOC_LOG_TYPE_HAS_ZONE, uintptr_t(zone), uintptr_t(ptr), 0, 0, 0);
erase_associations_internal(ptr);
if (in_subzone_memory(ptr)) {
Subzone::subzone(ptr)->admin()->deallocate(ptr);
} else if (in_large_memory(ptr)) {
deallocate_large(ptr);
} else {
error("Deallocating a non-block", ptr);
}
} else if (is_zombie(ptr)) {
zombify(this, ptr);
block_decrement_refcount(ptr);
} else {
handle_overretained_garbage(ptr, rc);
}
}
return bytes_freed;
}
Thread &Zone::register_thread() {
Thread *thread = current_thread();
if (thread == NULL) {
thread = new Thread(this);
Mutex lock(&_registered_threads_mutex);
thread->set_next(_registered_threads);
thread->needs_enlivening().state = _enlivening_enabled;
_registered_threads = thread;
}
pthread_setspecific(_registered_threads_key, thread);
return *thread;
}
void Zone::unregister_thread() {
return;
}
void Zone::destroy_registered_thread(void *key_value) {
if (key_value != INVALID_THREAD_KEY_VALUE) {
Thread *thread = (Thread *)key_value;
Zone *zone = thread->zone();
pthread_key_t thread_key = zone->_registered_threads_key;
if (thread->increment_tsd_count() == PTHREAD_DESTRUCTOR_ITERATIONS) {
thread->unbind();
key_value = INVALID_THREAD_KEY_VALUE;
}
pthread_setspecific(thread_key, key_value);
}
}
inline Thread *Zone::firstScannableThread() {
Mutex lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
if (thread->lockForScanning()) return thread;
}
return NULL;
}
inline Thread *Zone::nextScannableThread(Thread *thread) {
Mutex lock(&_registered_threads_mutex);
thread->unlockForScanning();
for (thread = thread->next(); thread != NULL; thread = thread->next()) {
if (thread->lockForScanning()) return thread;
}
return NULL;
}
void Zone::scan_registered_threads(thread_scanner_t scanner) {
for (Thread *thread = firstScannableThread(); thread != NULL; thread = nextScannableThread(thread)) {
scanner(thread);
}
}
void Zone::suspend_all_registered_threads() {
pthread_mutex_lock(&_registered_threads_mutex);
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
thread->suspend();
}
}
void Zone::resume_all_registered_threads() {
for (Thread *thread = _registered_threads; thread != NULL; thread = thread->next()) {
thread->resume();
}
pthread_mutex_unlock(&_registered_threads_mutex);
}
struct print_all_blocks_visitor {
Region *_last_region; Subzone *_last_subzone; bool _is_large;
print_all_blocks_visitor() : _last_region(NULL), _is_large(false) {}
inline bool visit(Zone *zone, Subzone *subzone, usword_t q) {
if (_last_region != subzone->region()) {
_last_region = subzone->region();
malloc_printf("Region [%p..%p]\n", _last_region->address(), _last_region->end());
}
void *block = subzone->quantum_address(q);
if (subzone->is_start(q)) {
zone->print_block(block);
} else {
FreeListNode *node = (FreeListNode *)block;
malloc_printf(" %p(%6d) ### free\n", block, node->size());
}
return true;
}
inline bool visit(Zone *zone, Large *large) {
if (!_is_large) {
malloc_printf("Large Blocks\n");
_is_large = true;
}
zone->print_block(large->address());
return true;
}
};
void Zone::print_all_blocks() {
SpinLock lock(&_region_lock);
print_all_blocks_visitor visitor;
visitAllBlocks(this, visitor);
}
void Zone::print_block(void *block) {
print_block(block, "");
}
void Zone::print_block(void *block, const char *tag) {
block = block_start(block);
if (!block) malloc_printf("%s%p is not a block", tag, block);
if (block) {
if (in_subzone_memory(block)) {
Subzone *subzone = Subzone::subzone(block);
usword_t q = subzone->quantum_index(block);
int rc = block_refcount(block);
int layout = subzone->layout(q);
bool is_unscanned = (layout & AUTO_UNSCANNED) != 0;
bool is_object = (layout & AUTO_OBJECT) != 0;
bool is_new = subzone->is_new(q);
bool is_marked = subzone->is_marked(q);
bool is_pending = false;
char *class_name = (char *)"";
if (is_object) {
void *isa = *(void **)block;
if (isa) class_name = *(char **)displace(isa, 8);
}
malloc_printf("%s%p(%6d) %s %s %s %s %s rc(%d) q(%u) subzone(%p) %s\n",
tag, block, (unsigned)subzone->size(q),
is_unscanned ? " " : "scn",
is_object ? "obj" : "mem",
is_new ? "new" : " ",
is_marked ? "mark" : " ",
is_pending ? "pend" : " ",
rc,
q, subzone,
class_name);
} else if (in_large_memory(block) && Large::is_start(block)) {
Large *large = Large::large(block);
int rc = block_refcount(block);
int layout = large->layout();
bool is_unscanned = (layout & AUTO_UNSCANNED) != 0;
bool is_object = (layout & AUTO_OBJECT) != 0;
bool is_new = large->is_new();
bool is_marked = large->is_marked();
bool is_pending = false;
char *class_name = (char *)"";
if (is_object) {
void *isa = *(void **)block;
if (isa) class_name = *(char **)displace(isa, 8); }
malloc_printf("%s%p(%6d) %s %s %s %s %s rc(%d) %s\n",
tag, block, (unsigned)large->size(),
is_unscanned ? " " : "scn",
is_object ? "obj" : "mem",
is_new ? "new" : " ",
is_marked ? "mark" : " ",
is_pending ? "pend" : " ",
rc,
class_name);
}
return;
}
malloc_printf("%s%p is not a block", tag, block);
}
};