Deleted Added
full compact
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//

--- 332 unchanged lines hidden (view full) ---

341
342/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
343/// where the source and destination may have different types.
344///
345/// This safely handles the case when the src type is larger than the
346/// destination type; the upper bits of the src will be lost.
347static void CreateCoercedStore(llvm::Value *Src,
348 llvm::Value *DstPtr,
349 bool DstIsVolatile,
350 CodeGenFunction &CGF) {
351 const llvm::Type *SrcTy = Src->getType();
352 const llvm::Type *DstTy =
353 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
354
355 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
356 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
357
358 // If store is legal, just bitcast the src pointer.
359 if (SrcSize <= DstSize) {
360 llvm::Value *Casted =
361 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
362 // FIXME: Use better alignment / avoid requiring aligned store.
362 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
363 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
364 } else {
365 // Otherwise do coercion through memory. This is stupid, but
366 // simple.
367
368 // Generally SrcSize is never greater than DstSize, since this means we are
369 // losing bits. However, this can happen in cases where the structure has
370 // additional padding, for example due to a user specified alignment.
371 //
372 // FIXME: Assert that we aren't truncating non-padding bits when have access
373 // to that information.
374 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
375 CGF.Builder.CreateStore(Src, Tmp);
376 llvm::Value *Casted =
377 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
378 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
379 // FIXME: Use better alignment / avoid requiring aligned load.
380 Load->setAlignment(1);
380 CGF.Builder.CreateStore(Load, DstPtr);
381 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
382 }
383}
384
385/***/
386
387bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
388 return FI.getReturnInfo().isIndirect();
389}

--- 338 unchanged lines hidden (view full) ---

728 continue;
729
730 case ABIArgInfo::Coerce: {
731 assert(AI != Fn->arg_end() && "Argument mismatch!");
732 // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
733 // result in a new alloca anyway, so we could just store into that
734 // directly if we broke the abstraction down more.
735 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
735 CreateCoercedStore(AI, V, *this);
736 CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
737 // Match to what EmitParmDecl is expecting for this type.
738 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
739 V = EmitLoadOfScalar(V, false, Ty);
740 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
741 // This must be a promotion, for something like
742 // "void a(x) short x; {..."
743 V = EmitScalarConversion(V, Ty, Arg->getType());
744 }

--- 60 unchanged lines hidden (view full) ---

805 if (ArgType->isReferenceType())
806 return EmitReferenceBindingToExpr(E, ArgType);
807
808 return EmitAnyExprToTemp(E);
809}
810
811RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
812 llvm::Value *Callee,
813 ReturnValueSlot ReturnValue,
814 const CallArgList &CallArgs,
815 const Decl *TargetDecl) {
816 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
817 llvm::SmallVector<llvm::Value*, 16> Args;
818
819 // Handle struct-return functions by passing a pointer to the
820 // location that we would like to return into.
821 QualType RetTy = CallInfo.getReturnType();
822 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
823
824
825 // If the call returns a temporary with struct return, create a temporary
824 // alloca to hold the result.
825 if (CGM.ReturnTypeUsesSret(CallInfo))
826 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
826 // alloca to hold the result, unless one is given to us.
827 if (CGM.ReturnTypeUsesSret(CallInfo)) {
828 llvm::Value *Value = ReturnValue.getValue();
829 if (!Value)
830 Value = CreateTempAlloca(ConvertTypeForMem(RetTy));
831 Args.push_back(Value);
832 }
833
834 assert(CallInfo.arg_size() == CallArgs.size() &&
835 "Mismatch between function signature & arguments.");
836 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
837 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
838 I != E; ++I, ++info_it) {
839 const ABIArgInfo &ArgInfo = info_it->info;
840 RValue RV = I->first;

--- 132 unchanged lines hidden (view full) ---

973 case ABIArgInfo::Extend:
974 case ABIArgInfo::Direct:
975 if (RetTy->isAnyComplexType()) {
976 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
977 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
978 return RValue::getComplex(std::make_pair(Real, Imag));
979 }
980 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
975 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
976 Builder.CreateStore(CI, V);
977 return RValue::getAggregate(V);
981 llvm::Value *DestPtr = ReturnValue.getValue();
982 bool DestIsVolatile = ReturnValue.isVolatile();
983
984 if (!DestPtr) {
985 DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
986 DestIsVolatile = false;
987 }
988 Builder.CreateStore(CI, DestPtr, DestIsVolatile);
989 return RValue::getAggregate(DestPtr);
990 }
991 return RValue::get(CI);
992
993 case ABIArgInfo::Ignore:
994 // If we are ignoring an argument that had a result, make sure to
995 // construct the appropriate return value for our caller.
996 return GetUndefRValue(RetTy);
997
998 case ABIArgInfo::Coerce: {
987 // FIXME: Avoid the conversion through memory if possible.
988 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
989 CreateCoercedStore(CI, V, *this);
999 llvm::Value *DestPtr = ReturnValue.getValue();
1000 bool DestIsVolatile = ReturnValue.isVolatile();
1001
1002 if (!DestPtr) {
1003 DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
1004 DestIsVolatile = false;
1005 }
1006
1007 CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
1008 if (RetTy->isAnyComplexType())
991 return RValue::getComplex(LoadComplexFromAddr(V, false));
1009 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1010 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
993 return RValue::getAggregate(V);
994 return RValue::get(EmitLoadOfScalar(V, false, RetTy));
1011 return RValue::getAggregate(DestPtr);
1012 return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
1013 }
1014
1015 case ABIArgInfo::Expand:
1016 assert(0 && "Invalid ABI kind for return argument");
1017 }
1018
1019 assert(0 && "Unhandled ABIArgInfo::Kind");
1020 return RValue::get(0);
1021}
1022
1023/* VarArg handling */
1024
1025llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1026 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1027}