#ifndef LLVM_ABI_H
#define LLVM_ABI_H
#include "llvm-internal.h"
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Attributes.h"
#include "llvm/Target/TargetData.h"
namespace llvm {
class BasicBlock;
}
extern "C" {
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
}
struct DefaultABIClient {
bool isShadowReturn() { return false; }
void HandleScalarResult(const Type *RetTy) {}
void HandleAggregateResultAsScalar(const Type *ScalarTy, unsigned Offset=0) {}
void HandleAggregateResultAsAggregate(const Type *AggrTy) {}
void HandleAggregateShadowResult(const PointerType *PtrArgTy, bool RetPtr){}
void HandleScalarShadowResult(const PointerType *PtrArgTy, bool RetPtr) {}
void HandleScalarArgument(const llvm::Type *LLVMTy, tree type,
unsigned RealSize = 0) {}
void HandleByInvisibleReferenceArgument(const llvm::Type *PtrTy, tree type) {}
void HandleByValArgument(const llvm::Type *LLVMTy, tree type) {}
void HandleFCAArgument(const llvm::Type *LLVMTy, tree type) {}
void EnterField(unsigned FieldNo, const llvm::Type *StructTy) {}
void ExitField() {}
};
static bool isAggregateTreeType(tree type) {
return TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == ARRAY_TYPE ||
TREE_CODE(type) == UNION_TYPE || TREE_CODE(type) == QUAL_UNION_TYPE ||
TREE_CODE(type) == COMPLEX_TYPE;
}
#ifndef LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY
#define LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(X) \
false
#endif
static bool doNotUseShadowReturn(tree type, tree fndecl) {
if (!TYPE_SIZE(type))
return false;
if (TREE_CODE(TYPE_SIZE(type)) != INTEGER_CST)
return false;
if (LLVM_SHOULD_NOT_RETURN_COMPLEX_IN_MEMORY(type))
return true;
if (aggregate_value_p(type, fndecl))
return false;
return true;
}
static tree isSingleElementStructOrArray(tree type, bool ignoreZeroLength,
bool rejectFatBitfield) {
if (!isAggregateTreeType(type)) return type;
tree FoundField = 0;
switch (TREE_CODE(type)) {
case QUAL_UNION_TYPE:
case UNION_TYPE: case COMPLEX_TYPE: default:
return 0;
case RECORD_TYPE:
if (TREE_CODE(TYPE_SIZE(type)) != INTEGER_CST)
return 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
if (TREE_CODE(Field) == FIELD_DECL) {
if (ignoreZeroLength) {
if (DECL_SIZE(Field) &&
TREE_CODE(DECL_SIZE(Field)) == INTEGER_CST &&
TREE_INT_CST_LOW(DECL_SIZE(Field)) == 0)
continue;
}
if (!FoundField) {
if (rejectFatBitfield &&
TREE_CODE(TYPE_SIZE(type)) == INTEGER_CST &&
TREE_INT_CST_LOW(TYPE_SIZE(getDeclaredType(Field))) >
TREE_INT_CST_LOW(TYPE_SIZE(type)))
return 0;
FoundField = getDeclaredType(Field);
} else {
return 0; }
}
return FoundField ? isSingleElementStructOrArray(FoundField,
ignoreZeroLength, false)
: 0;
case ARRAY_TYPE:
const ArrayType *Ty = dyn_cast<ArrayType>(ConvertType(type));
if (!Ty || Ty->getNumElements() != 1)
return 0;
return isSingleElementStructOrArray(TREE_TYPE(type), false, false);
}
}
static bool isZeroSizedStructOrUnion(tree type) {
if (TREE_CODE(type) != RECORD_TYPE &&
TREE_CODE(type) != UNION_TYPE &&
TREE_CODE(type) != QUAL_UNION_TYPE)
return false;
return int_size_in_bytes(type) == 0;
}
static const Type* getLLVMScalarTypeForStructReturn(tree type, unsigned *Offset) {
const Type *Ty = ConvertType(type);
unsigned Size = getTargetData().getTypePaddedSize(Ty);
*Offset = 0;
if (Size == 0)
return Type::VoidTy;
else if (Size == 1)
return Type::Int8Ty;
else if (Size == 2)
return Type::Int16Ty;
else if (Size <= 4)
return Type::Int32Ty;
else if (Size <= 8)
return Type::Int64Ty;
else if (Size <= 16)
return IntegerType::get(128);
else if (Size <= 32)
return IntegerType::get(256);
return NULL;
}
static const Type* getLLVMAggregateTypeForStructReturn(tree type) {
return NULL;
}
#ifndef LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS
#define LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(TY) \
false
#endif
#ifndef LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR
#define LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(X) \
false
#endif
#ifndef LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR
#define LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(X, TY) \
false
#endif
#ifndef LLVM_SHOULD_PASS_AGGREGATE_AS_FCA
#define LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(X, TY) \
false
#endif
#ifndef LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS
#define LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(T, TY, E) \
false
#endif
#ifndef LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS
#define LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(E, SE, ISR) \
false
#endif
#ifndef LLVM_BYVAL_ALIGNMENT
#define LLVM_BYVAL_ALIGNMENT(T) 0
#endif
#ifndef LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS
#define LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(X, Y, Z) \
!isSingleElementStructOrArray((X), false, true)
#endif
#ifndef LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR
#define LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(X) \
isSingleElementStructOrArray(X, false, false)
#endif
#ifndef LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR
#define LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(X,Y) 0
#endif
#ifndef LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW
#define LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(X,Y) 0
#endif
#ifndef LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN
#define LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(X, Y) \
getLLVMScalarTypeForStructReturn((X), (Y))
#endif
#ifndef LLVM_AGGR_TYPE_FOR_STRUCT_RETURN
#define LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(X) \
getLLVMAggregateTypeForStructReturn(X)
#endif
#ifndef LLVM_EXTRACT_MULTIPLE_RETURN_VALUE
#define LLVM_EXTRACT_MULTIPLE_RETURN_VALUE(Src,Dest,V,B) \
llvm_default_extract_multiple_return_value((Src),(Dest),(V),(B))
#endif
static void llvm_default_extract_multiple_return_value(Value *Src, Value *Dest,
bool isVolatile,
LLVMBuilder &Builder) {
assert (0 && "LLVM_EXTRACT_MULTIPLE_RETURN_VALUE is not implemented!");
}
template<typename Client>
class DefaultABI {
protected:
Client &C;
public:
DefaultABI(Client &c) : C(c) {}
bool isShadowReturn() const { return C.isShadowReturn(); }
void HandleReturnType(tree type, tree fn, bool isBuiltin) {
unsigned Offset = 0;
const Type *Ty = ConvertType(type);
if (Ty->getTypeID() == Type::VectorTyID) {
tree ScalarType = LLVM_SHOULD_RETURN_VECTOR_AS_SCALAR(type, isBuiltin);
if (ScalarType)
C.HandleAggregateResultAsScalar(ConvertType(ScalarType));
else if (LLVM_SHOULD_RETURN_VECTOR_AS_SHADOW(type, isBuiltin))
C.HandleScalarShadowResult(PointerType::getUnqual(Ty), false);
else
C.HandleScalarResult(Ty);
} else if (Ty->isSingleValueType() || Ty == Type::VoidTy) {
C.HandleScalarResult(Ty);
} else if (doNotUseShadowReturn(type, fn)) {
tree SingleElt = LLVM_SHOULD_RETURN_SELT_STRUCT_AS_SCALAR(type);
if (SingleElt && TYPE_SIZE(SingleElt) &&
TREE_CODE(TYPE_SIZE(SingleElt)) == INTEGER_CST &&
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(type)) ==
TREE_INT_CST_LOW(TYPE_SIZE_UNIT(SingleElt))) {
C.HandleAggregateResultAsScalar(ConvertType(SingleElt));
} else {
if (const Type *AggrTy = LLVM_AGGR_TYPE_FOR_STRUCT_RETURN(type))
C.HandleAggregateResultAsAggregate(AggrTy);
else if (const Type* ScalarTy =
LLVM_SCALAR_TYPE_FOR_STRUCT_RETURN(type, &Offset))
C.HandleAggregateResultAsScalar(ScalarTy, Offset);
else {
assert(0 && "Unable to determine how to return this aggregate!");
abort();
}
}
} else {
C.HandleAggregateShadowResult(PointerType::getUnqual(Ty), false);
}
}
void HandleArgument(tree type, std::vector<const Type*> &ScalarElts,
Attributes *Attributes = NULL) {
unsigned Size = 0;
bool DontCheckAlignment = false;
const Type *Ty = ConvertType(type);
std::vector<const Type*> Elts;
if (isPassedByInvisibleReference(type)) { const Type *PtrTy = PointerType::getUnqual(Ty);
C.HandleByInvisibleReferenceArgument(PtrTy, type);
ScalarElts.push_back(PtrTy);
} else if (Ty->getTypeID()==Type::VectorTyID) {
if (LLVM_SHOULD_PASS_VECTOR_IN_INTEGER_REGS(type)) {
PassInIntegerRegisters(type, Ty, ScalarElts, 0, false);
} else if (LLVM_SHOULD_PASS_VECTOR_USING_BYVAL_ATTR(type)) {
C.HandleByValArgument(Ty, type);
if (Attributes) {
*Attributes |= Attribute::ByVal;
*Attributes |=
Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
}
} else {
C.HandleScalarArgument(Ty, type);
ScalarElts.push_back(Ty);
}
} else if (Ty->isSingleValueType()) {
C.HandleScalarArgument(Ty, type);
ScalarElts.push_back(Ty);
} else if (LLVM_SHOULD_PASS_AGGREGATE_AS_FCA(type, Ty)) {
C.HandleFCAArgument(Ty, type);
} else if (LLVM_SHOULD_PASS_AGGREGATE_IN_MIXED_REGS(type, Ty, Elts)) {
if (!LLVM_AGGREGATE_PARTIALLY_PASSED_IN_REGS(Elts, ScalarElts,
C.isShadowReturn()))
PassInMixedRegisters(type, Ty, Elts, ScalarElts);
else {
C.HandleByValArgument(Ty, type);
if (Attributes) {
*Attributes |= Attribute::ByVal;
*Attributes |=
Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
}
}
} else if (LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(type, Ty)) {
C.HandleByValArgument(Ty, type);
if (Attributes) {
*Attributes |= Attribute::ByVal;
*Attributes |=
Attribute::constructAlignmentFromInt(LLVM_BYVAL_ALIGNMENT(type));
}
} else if (LLVM_SHOULD_PASS_AGGREGATE_IN_INTEGER_REGS(type, &Size,
&DontCheckAlignment)) {
PassInIntegerRegisters(type, Ty, ScalarElts, Size, DontCheckAlignment);
} else if (isZeroSizedStructOrUnion(type)) {
;
} else if (TREE_CODE(type) == RECORD_TYPE) {
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field))
if (TREE_CODE(Field) == FIELD_DECL) {
const tree Ftype = getDeclaredType(Field);
const Type *FTy = ConvertType(Ftype);
unsigned FNo = GetFieldIndex(Field);
assert(FNo != ~0U && "Case not handled yet!");
if (!LLVM_SHOULD_PASS_AGGREGATE_USING_BYVAL_ATTR(Ftype, FTy)) {
C.EnterField(FNo, Ty);
HandleArgument(getDeclaredType(Field), ScalarElts);
C.ExitField();
}
}
} else if (TREE_CODE(type) == COMPLEX_TYPE) {
C.EnterField(0, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
C.ExitField();
C.EnterField(1, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
C.ExitField();
} else if ((TREE_CODE(type) == UNION_TYPE) ||
(TREE_CODE(type) == QUAL_UNION_TYPE)) {
HandleUnion(type, ScalarElts);
} else if (TREE_CODE(type) == ARRAY_TYPE) {
const ArrayType *ATy = cast<ArrayType>(Ty);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
C.EnterField(i, Ty);
HandleArgument(TREE_TYPE(type), ScalarElts);
C.ExitField();
}
} else {
assert(0 && "unknown aggregate type!");
abort();
}
}
void HandleUnion(tree type, std::vector<const Type*> &ScalarElts) {
if (TYPE_TRANSPARENT_UNION(type)) {
tree Field = TYPE_FIELDS(type);
assert(Field && "Transparent union must have some elements!");
while (TREE_CODE(Field) != FIELD_DECL) {
Field = TREE_CHAIN(Field);
assert(Field && "Transparent union must have some elements!");
}
HandleArgument(TREE_TYPE(Field), ScalarElts);
} else {
unsigned MaxSize = 0;
tree MaxElt = 0;
for (tree Field = TYPE_FIELDS(type); Field; Field = TREE_CHAIN(Field)) {
if (TREE_CODE(Field) == FIELD_DECL) {
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_zerop(DECL_QUALIFIER(Field)))
continue;
tree SizeTree = TYPE_SIZE(TREE_TYPE(Field));
unsigned Size = ((unsigned)TREE_INT_CST_LOW(SizeTree)+7)/8;
if (Size > MaxSize) {
MaxSize = Size;
MaxElt = Field;
}
if (TREE_CODE(type) == QUAL_UNION_TYPE &&
integer_onep(DECL_QUALIFIER(Field)))
break;
}
}
if (MaxElt)
HandleArgument(TREE_TYPE(MaxElt), ScalarElts);
}
}
void PassInIntegerRegisters(tree type, const Type *Ty,
std::vector<const Type*> &ScalarElts,
unsigned origSize, bool DontCheckAlignment) {
unsigned Size;
if (origSize)
Size = origSize;
else
Size = TREE_INT_CST_LOW(TYPE_SIZE(type))/8;
unsigned Align = TYPE_ALIGN(type)/8;
unsigned Int64Align = getTargetData().getABITypeAlignment(Type::Int64Ty);
bool UseInt64 = DontCheckAlignment ? true : (Align >= Int64Align);
std::vector<const Type*> Elts;
unsigned ElementSize = UseInt64 ? 8:4;
unsigned ArraySize = Size / ElementSize;
const Type *ATy = NULL;
const Type *ArrayElementType = NULL;
if (ArraySize) {
Size = Size % ElementSize;
ArrayElementType = (UseInt64)?Type::Int64Ty:Type::Int32Ty;
ATy = ArrayType::get(ArrayElementType, ArraySize);
Elts.push_back(ATy);
}
if (Size >= 4) {
Elts.push_back(Type::Int32Ty);
Size -= 4;
}
if (Size >= 2) {
Elts.push_back(Type::Int16Ty);
Size -= 2;
}
if (Size >= 1) {
Elts.push_back(Type::Int8Ty);
Size -= 1;
}
assert(Size == 0 && "Didn't cover value?");
const StructType *STy = StructType::get(Elts, false);
unsigned i = 0;
if (ArraySize) {
C.EnterField(0, STy);
for (unsigned j = 0; j < ArraySize; ++j) {
C.EnterField(j, ATy);
C.HandleScalarArgument(ArrayElementType, 0);
ScalarElts.push_back(ArrayElementType);
C.ExitField();
}
C.ExitField();
++i;
}
for (unsigned e = Elts.size(); i != e; ++i) {
C.EnterField(i, STy);
C.HandleScalarArgument(Elts[i], 0);
ScalarElts.push_back(Elts[i]);
C.ExitField();
}
}
void PassInMixedRegisters(tree type, const Type *Ty,
std::vector<const Type*> &OrigElts,
std::vector<const Type*> &ScalarElts) {
std::vector<const Type*> Elts(OrigElts);
const Type* wordType = getTargetData().getPointerSize() == 4 ? Type::Int32Ty :
Type::Int64Ty;
for (unsigned i=0, e=Elts.size(); i!=e; ++i)
if (OrigElts[i]==Type::VoidTy)
Elts[i] = wordType;
const StructType *STy = StructType::get(Elts, false);
unsigned Size = getTargetData().getTypePaddedSize(STy);
const StructType *InSTy = dyn_cast<StructType>(Ty);
unsigned InSize = 0;
unsigned LastEltSizeDiff = 0;
if (InSTy) {
InSize = getTargetData().getTypePaddedSize(InSTy);
if (InSize < Size) {
unsigned N = STy->getNumElements();
const llvm::Type *LastEltTy = STy->getElementType(N-1);
if (LastEltTy->isInteger())
LastEltSizeDiff =
getTargetData().getTypePaddedSize(LastEltTy) - (Size - InSize);
}
}
for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
if (OrigElts[i] != Type::VoidTy) {
C.EnterField(i, STy);
unsigned RealSize = 0;
if (LastEltSizeDiff && i == (e - 1))
RealSize = LastEltSizeDiff;
C.HandleScalarArgument(Elts[i], 0, RealSize);
ScalarElts.push_back(Elts[i]);
C.ExitField();
}
}
}
};
#ifndef TheLLVMABI
#define TheLLVMABI DefaultABI
#endif
#endif