Deleted Added
full compact
CGCall.cpp (200583) CGCall.cpp (201361)
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//

--- 332 unchanged lines hidden (view full) ---

341
342/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
343/// where the source and destination may have different types.
344///
345/// This safely handles the case when the src type is larger than the
346/// destination type; the upper bits of the src will be lost.
347static void CreateCoercedStore(llvm::Value *Src,
348 llvm::Value *DstPtr,
1//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//

--- 332 unchanged lines hidden (view full) ---

341
342/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
343/// where the source and destination may have different types.
344///
345/// This safely handles the case when the src type is larger than the
346/// destination type; the upper bits of the src will be lost.
347static void CreateCoercedStore(llvm::Value *Src,
348 llvm::Value *DstPtr,
349 bool DstIsVolatile,
349 CodeGenFunction &CGF) {
350 const llvm::Type *SrcTy = Src->getType();
351 const llvm::Type *DstTy =
352 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
353
354 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
355 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
356
357 // If store is legal, just bitcast the src pointer.
358 if (SrcSize <= DstSize) {
359 llvm::Value *Casted =
360 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
361 // FIXME: Use better alignment / avoid requiring aligned store.
350 CodeGenFunction &CGF) {
351 const llvm::Type *SrcTy = Src->getType();
352 const llvm::Type *DstTy =
353 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
354
355 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
356 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
357
358 // If store is legal, just bitcast the src pointer.
359 if (SrcSize <= DstSize) {
360 llvm::Value *Casted =
361 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
362 // FIXME: Use better alignment / avoid requiring aligned store.
362 CGF.Builder.CreateStore(Src, Casted)->setAlignment(1);
363 CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
363 } else {
364 // Otherwise do coercion through memory. This is stupid, but
365 // simple.
366
367 // Generally SrcSize is never greater than DstSize, since this means we are
368 // losing bits. However, this can happen in cases where the structure has
369 // additional padding, for example due to a user specified alignment.
370 //
371 // FIXME: Assert that we aren't truncating non-padding bits when have access
372 // to that information.
373 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
374 CGF.Builder.CreateStore(Src, Tmp);
375 llvm::Value *Casted =
376 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
377 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
378 // FIXME: Use better alignment / avoid requiring aligned load.
379 Load->setAlignment(1);
364 } else {
365 // Otherwise do coercion through memory. This is stupid, but
366 // simple.
367
368 // Generally SrcSize is never greater than DstSize, since this means we are
369 // losing bits. However, this can happen in cases where the structure has
370 // additional padding, for example due to a user specified alignment.
371 //
372 // FIXME: Assert that we aren't truncating non-padding bits when have access
373 // to that information.
374 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
375 CGF.Builder.CreateStore(Src, Tmp);
376 llvm::Value *Casted =
377 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
378 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
379 // FIXME: Use better alignment / avoid requiring aligned load.
380 Load->setAlignment(1);
380 CGF.Builder.CreateStore(Load, DstPtr);
381 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
381 }
382}
383
384/***/
385
386bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
387 return FI.getReturnInfo().isIndirect();
388}

--- 338 unchanged lines hidden (view full) ---

727 continue;
728
729 case ABIArgInfo::Coerce: {
730 assert(AI != Fn->arg_end() && "Argument mismatch!");
731 // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
732 // result in a new alloca anyway, so we could just store into that
733 // directly if we broke the abstraction down more.
734 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
382 }
383}
384
385/***/
386
387bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
388 return FI.getReturnInfo().isIndirect();
389}

--- 338 unchanged lines hidden (view full) ---

728 continue;
729
730 case ABIArgInfo::Coerce: {
731 assert(AI != Fn->arg_end() && "Argument mismatch!");
732 // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
733 // result in a new alloca anyway, so we could just store into that
734 // directly if we broke the abstraction down more.
735 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(Ty), "coerce");
735 CreateCoercedStore(AI, V, *this);
736 CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
736 // Match to what EmitParmDecl is expecting for this type.
737 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
738 V = EmitLoadOfScalar(V, false, Ty);
739 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
740 // This must be a promotion, for something like
741 // "void a(x) short x; {..."
742 V = EmitScalarConversion(V, Ty, Arg->getType());
743 }

--- 60 unchanged lines hidden (view full) ---

804 if (ArgType->isReferenceType())
805 return EmitReferenceBindingToExpr(E, ArgType);
806
807 return EmitAnyExprToTemp(E);
808}
809
810RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
811 llvm::Value *Callee,
737 // Match to what EmitParmDecl is expecting for this type.
738 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
739 V = EmitLoadOfScalar(V, false, Ty);
740 if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
741 // This must be a promotion, for something like
742 // "void a(x) short x; {..."
743 V = EmitScalarConversion(V, Ty, Arg->getType());
744 }

--- 60 unchanged lines hidden (view full) ---

805 if (ArgType->isReferenceType())
806 return EmitReferenceBindingToExpr(E, ArgType);
807
808 return EmitAnyExprToTemp(E);
809}
810
811RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
812 llvm::Value *Callee,
813 ReturnValueSlot ReturnValue,
812 const CallArgList &CallArgs,
813 const Decl *TargetDecl) {
814 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
815 llvm::SmallVector<llvm::Value*, 16> Args;
816
817 // Handle struct-return functions by passing a pointer to the
818 // location that we would like to return into.
819 QualType RetTy = CallInfo.getReturnType();
820 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
821
822
823 // If the call returns a temporary with struct return, create a temporary
814 const CallArgList &CallArgs,
815 const Decl *TargetDecl) {
816 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
817 llvm::SmallVector<llvm::Value*, 16> Args;
818
819 // Handle struct-return functions by passing a pointer to the
820 // location that we would like to return into.
821 QualType RetTy = CallInfo.getReturnType();
822 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
823
824
825 // If the call returns a temporary with struct return, create a temporary
824 // alloca to hold the result.
825 if (CGM.ReturnTypeUsesSret(CallInfo))
826 Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
826 // alloca to hold the result, unless one is given to us.
827 if (CGM.ReturnTypeUsesSret(CallInfo)) {
828 llvm::Value *Value = ReturnValue.getValue();
829 if (!Value)
830 Value = CreateTempAlloca(ConvertTypeForMem(RetTy));
831 Args.push_back(Value);
832 }
827
828 assert(CallInfo.arg_size() == CallArgs.size() &&
829 "Mismatch between function signature & arguments.");
830 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
831 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
832 I != E; ++I, ++info_it) {
833 const ABIArgInfo &ArgInfo = info_it->info;
834 RValue RV = I->first;

--- 132 unchanged lines hidden (view full) ---

967 case ABIArgInfo::Extend:
968 case ABIArgInfo::Direct:
969 if (RetTy->isAnyComplexType()) {
970 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
971 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
972 return RValue::getComplex(std::make_pair(Real, Imag));
973 }
974 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
833
834 assert(CallInfo.arg_size() == CallArgs.size() &&
835 "Mismatch between function signature & arguments.");
836 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
837 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
838 I != E; ++I, ++info_it) {
839 const ABIArgInfo &ArgInfo = info_it->info;
840 RValue RV = I->first;

--- 132 unchanged lines hidden (view full) ---

973 case ABIArgInfo::Extend:
974 case ABIArgInfo::Direct:
975 if (RetTy->isAnyComplexType()) {
976 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
977 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
978 return RValue::getComplex(std::make_pair(Real, Imag));
979 }
980 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
975 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
976 Builder.CreateStore(CI, V);
977 return RValue::getAggregate(V);
981 llvm::Value *DestPtr = ReturnValue.getValue();
982 bool DestIsVolatile = ReturnValue.isVolatile();
983
984 if (!DestPtr) {
985 DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "agg.tmp");
986 DestIsVolatile = false;
987 }
988 Builder.CreateStore(CI, DestPtr, DestIsVolatile);
989 return RValue::getAggregate(DestPtr);
978 }
979 return RValue::get(CI);
980
981 case ABIArgInfo::Ignore:
982 // If we are ignoring an argument that had a result, make sure to
983 // construct the appropriate return value for our caller.
984 return GetUndefRValue(RetTy);
985
986 case ABIArgInfo::Coerce: {
990 }
991 return RValue::get(CI);
992
993 case ABIArgInfo::Ignore:
994 // If we are ignoring an argument that had a result, make sure to
995 // construct the appropriate return value for our caller.
996 return GetUndefRValue(RetTy);
997
998 case ABIArgInfo::Coerce: {
987 // FIXME: Avoid the conversion through memory if possible.
988 llvm::Value *V = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
989 CreateCoercedStore(CI, V, *this);
999 llvm::Value *DestPtr = ReturnValue.getValue();
1000 bool DestIsVolatile = ReturnValue.isVolatile();
1001
1002 if (!DestPtr) {
1003 DestPtr = CreateTempAlloca(ConvertTypeForMem(RetTy), "coerce");
1004 DestIsVolatile = false;
1005 }
1006
1007 CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
990 if (RetTy->isAnyComplexType())
1008 if (RetTy->isAnyComplexType())
991 return RValue::getComplex(LoadComplexFromAddr(V, false));
1009 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
992 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1010 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
993 return RValue::getAggregate(V);
994 return RValue::get(EmitLoadOfScalar(V, false, RetTy));
1011 return RValue::getAggregate(DestPtr);
1012 return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
995 }
996
997 case ABIArgInfo::Expand:
998 assert(0 && "Invalid ABI kind for return argument");
999 }
1000
1001 assert(0 && "Unhandled ABIArgInfo::Kind");
1002 return RValue::get(0);
1003}
1004
1005/* VarArg handling */
1006
1007llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1008 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1009}
1013 }
1014
1015 case ABIArgInfo::Expand:
1016 assert(0 && "Invalid ABI kind for return argument");
1017 }
1018
1019 assert(0 && "Unhandled ABIArgInfo::Kind");
1020 return RValue::get(0);
1021}
1022
1023/* VarArg handling */
1024
1025llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1026 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
1027}