#ifndef LinkBuffer_h
#define LinkBuffer_h
#if ENABLE(ASSEMBLER)
#define DUMP_LINK_STATISTICS 0
#define DUMP_CODE 0
#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
#include "JITCompilationEffort.h"
#include "MacroAssembler.h"
#include <wtf/DataLog.h>
#include <wtf/Noncopyable.h>
namespace JSC {
class JSGlobalData;
class LinkBuffer {
WTF_MAKE_NONCOPYABLE(LinkBuffer);
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssembler::Label Label;
typedef MacroAssembler::Jump Jump;
typedef MacroAssembler::PatchableJump PatchableJump;
typedef MacroAssembler::JumpList JumpList;
typedef MacroAssembler::Call Call;
typedef MacroAssembler::DataLabelCompact DataLabelCompact;
typedef MacroAssembler::DataLabel32 DataLabel32;
typedef MacroAssembler::DataLabelPtr DataLabelPtr;
#if ENABLE(BRANCH_COMPACTION)
typedef MacroAssembler::LinkRecord LinkRecord;
typedef MacroAssembler::JumpLinkType JumpLinkType;
#endif
public:
LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
: m_size(0)
#if ENABLE(BRANCH_COMPACTION)
, m_initialSize(0)
#endif
, m_code(0)
, m_assembler(masm)
, m_globalData(&globalData)
#ifndef NDEBUG
, m_completed(false)
, m_effort(effort)
#endif
{
linkCode(ownerUID, effort);
}
~LinkBuffer()
{
ASSERT(m_completed || (!m_executableMemory && m_effort == JITCompilationCanFail));
}
bool didFailToAllocate() const
{
return !m_executableMemory;
}
bool isValid() const
{
return !didFailToAllocate();
}
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
call.m_label = applyOffset(call.m_label);
MacroAssembler::linkCall(code(), call, function);
}
void link(Jump jump, CodeLocationLabel label)
{
jump.m_label = applyOffset(jump.m_label);
MacroAssembler::linkJump(code(), jump, label);
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.size(); ++i)
link(list.m_jumps[i], label);
}
void patch(DataLabelPtr label, void* value)
{
AssemblerLabel target = applyOffset(label.m_label);
MacroAssembler::linkPointer(code(), target, value);
}
void patch(DataLabelPtr label, CodeLocationLabel value)
{
AssemblerLabel target = applyOffset(label.m_label);
MacroAssembler::linkPointer(code(), target, value.executableAddress());
}
CodeLocationCall locationOf(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)));
}
CodeLocationLabel locationOf(PatchableJump jump)
{
return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(jump.m_jump.m_label)));
}
CodeLocationLabel locationOf(Label label)
{
return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
{
return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabel32 locationOf(DataLabel32 label)
{
return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabelCompact locationOf(DataLabelCompact label)
{
return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
unsigned returnAddressOffset(Call call)
{
call.m_label = applyOffset(call.m_label);
return MacroAssembler::getLinkerCallReturnOffset(call);
}
uint32_t offsetOf(Label label)
{
return applyOffset(label.m_label).m_offset;
}
CodeRef finalizeCode()
{
performFinalization();
return CodeRef(m_executableMemory);
}
CodePtr trampolineAt(Label label)
{
return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
}
void* debugAddress()
{
return m_code;
}
size_t debugSize()
{
return m_size;
}
private:
template <typename T> T applyOffset(T src)
{
#if ENABLE(BRANCH_COMPACTION)
src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
#endif
return src;
}
void* code()
{
return m_code;
}
void linkCode(void* ownerUID, JITCompilationEffort effort)
{
ASSERT(!m_code);
#if !ENABLE(BRANCH_COMPACTION)
m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID, effort);
if (!m_executableMemory)
return;
m_code = m_executableMemory->start();
m_size = m_assembler->m_assembler.codeSize();
ASSERT(m_code);
#else
m_initialSize = m_assembler->m_assembler.codeSize();
m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID, effort);
if (!m_executableMemory)
return;
m_code = (uint8_t*)m_executableMemory->start();
ASSERT(m_code);
ExecutableAllocator::makeWritable(m_code, m_initialSize);
uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
int readPtr = 0;
int writePtr = 0;
Vector<LinkRecord>& jumpsToLink = m_assembler->jumpsToLink();
unsigned jumpCount = jumpsToLink.size();
for (unsigned i = 0; i < jumpCount; ++i) {
int offset = readPtr - writePtr;
ASSERT(!(offset & 1));
size_t regionSize = jumpsToLink[i].from() - readPtr;
uint16_t* copySource = reinterpret_cast_ptr<uint16_t*>(inData + readPtr);
uint16_t* copyEnd = reinterpret_cast_ptr<uint16_t*>(inData + readPtr + regionSize);
uint16_t* copyDst = reinterpret_cast_ptr<uint16_t*>(outData + writePtr);
ASSERT(!(regionSize % 2));
ASSERT(!(readPtr % 2));
ASSERT(!(writePtr % 2));
while (copySource != copyEnd)
*copyDst++ = *copySource++;
m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
readPtr += regionSize;
writePtr += regionSize;
const uint8_t* target;
if (jumpsToLink[i].to() >= jumpsToLink[i].from())
target = outData + jumpsToLink[i].to() - offset; else
target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target);
if (m_assembler->canCompact(jumpsToLink[i].type())) {
int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType);
if (delta) {
writePtr -= delta;
m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
}
}
jumpsToLink[i].setFrom(writePtr);
}
memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
for (unsigned i = 0; i < jumpCount; ++i) {
uint8_t* location = outData + jumpsToLink[i].from();
uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
m_assembler->link(jumpsToLink[i], location, target);
}
jumpsToLink.clear();
m_size = writePtr + m_initialSize - readPtr;
m_executableMemory->shrink(m_size);
#if DUMP_LINK_STATISTICS
dumpLinkStatistics(m_code, m_initialSize, m_size);
#endif
#if DUMP_CODE
dumpCode(m_code, m_size);
#endif
#endif
}
void performFinalization()
{
#ifndef NDEBUG
ASSERT(!m_completed);
ASSERT(isValid());
m_completed = true;
#endif
#if ENABLE(BRANCH_COMPACTION)
ExecutableAllocator::makeExecutable(code(), m_initialSize);
#else
ExecutableAllocator::makeExecutable(code(), m_size);
#endif
MacroAssembler::cacheFlush(code(), m_size);
}
#if DUMP_LINK_STATISTICS
static void dumpLinkStatistics(void* code, size_t initialSize, size_t finalSize)
{
static unsigned linkCount = 0;
static unsigned totalInitialSize = 0;
static unsigned totalFinalSize = 0;
linkCount++;
totalInitialSize += initialSize;
totalFinalSize += finalSize;
dataLog("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
static_cast<unsigned>(initialSize - finalSize),
100.0 * (initialSize - finalSize) / initialSize);
dataLog("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
}
#endif
#if DUMP_CODE
static void dumpCode(void* code, size_t size)
{
#if CPU(ARM_THUMB2)
static unsigned codeCount = 0;
unsigned short* tcode = static_cast<unsigned short*>(code);
size_t tsize = size / sizeof(short);
char nameBuf[128];
snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
dataLog("\t.syntax unified\n"
"\t.section\t__TEXT,__text,regular,pure_instructions\n"
"\t.globl\t%s\n"
"\t.align 2\n"
"\t.code 16\n"
"\t.thumb_func\t%s\n"
"# %p\n"
"%s:\n", nameBuf, nameBuf, code, nameBuf);
for (unsigned i = 0; i < tsize; i++)
dataLog("\t.short\t0x%x\n", tcode[i]);
#elif CPU(ARM_TRADITIONAL)
static unsigned codeCount = 0;
unsigned int* tcode = static_cast<unsigned int*>(code);
size_t tsize = size / sizeof(unsigned int);
char nameBuf[128];
snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
dataLog("\t.globl\t%s\n"
"\t.align 4\n"
"\t.code 32\n"
"\t.text\n"
"# %p\n"
"%s:\n", nameBuf, code, nameBuf);
for (unsigned i = 0; i < tsize; i++)
dataLog("\t.long\t0x%x\n", tcode[i]);
#endif
}
#endif
RefPtr<ExecutableMemoryHandle> m_executableMemory;
size_t m_size;
#if ENABLE(BRANCH_COMPACTION)
size_t m_initialSize;
#endif
void* m_code;
MacroAssembler* m_assembler;
JSGlobalData* m_globalData;
#ifndef NDEBUG
bool m_completed;
JITCompilationEffort m_effort;
#endif
};
}
#endif // ENABLE(ASSEMBLER)
#endif // LinkBuffer_h