diff --git a/src/coreclr/vm/amd64/PInvokeStubs.asm b/src/coreclr/vm/amd64/PInvokeStubs.asm index 9913dbd3c53f5b..c334774b443f02 100644 --- a/src/coreclr/vm/amd64/PInvokeStubs.asm +++ b/src/coreclr/vm/amd64/PInvokeStubs.asm @@ -113,11 +113,10 @@ NESTED_ENTRY VarargPInvokeGenILStub, _TEXT mov r13, PINVOKE_CALLI_SIGTOKEN_REGISTER ; - ; VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie *pVASigCookie, MethodDesc *pMD) + ; VarargPInvokeStubWorker(TransitionBlock* pTransitionBlock, VASigCookie* pVASigCookie) ; lea rcx, [rsp + __PWTB_TransitionBlock] ; pTransitionBlock* mov rdx, PINVOKE_CALLI_SIGTOKEN_REGISTER ; pVASigCookie - mov r8, METHODDESC_REGISTER ; pMD call VarargPInvokeStubWorker ; diff --git a/src/coreclr/vm/amd64/pinvokestubs.S b/src/coreclr/vm/amd64/pinvokestubs.S index 49bc602afee105..79ec380f288d46 100644 --- a/src/coreclr/vm/amd64/pinvokestubs.S +++ b/src/coreclr/vm/amd64/pinvokestubs.S @@ -109,11 +109,10 @@ NESTED_ENTRY VarargPInvokeGenILStub, _TEXT, NoHandler mov r13, PINVOKE_CALLI_SIGTOKEN_REGISTER // - // VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie *pVASigCookie, MethodDesc *pMD) + // VarargPInvokeStubWorker(TransitionBlock* pTransitionBlock, VASigCookie* pVASigCookie) // lea rdi, [rsp + __PWTB_TransitionBlock] // pTransitionBlock* mov rsi, PINVOKE_CALLI_SIGTOKEN_REGISTER // pVASigCookie - mov rdx, METHODDESC_REGISTER // pMD call C_FUNC(VarargPInvokeStubWorker) // diff --git a/src/coreclr/vm/ceeload.cpp b/src/coreclr/vm/ceeload.cpp index adfe056bdbe20b..c2049b36910136 100644 --- a/src/coreclr/vm/ceeload.cpp +++ b/src/coreclr/vm/ceeload.cpp @@ -687,16 +687,6 @@ void Module::Destruct() ReleaseISymUnmanagedReader(); - // Clean up sig cookies - VASigCookieBlock *pVASigCookieBlock = m_pVASigCookieBlock; - while (pVASigCookieBlock) - { - VASigCookieBlock *pNext = pVASigCookieBlock->m_Next; - delete pVASigCookieBlock; - - pVASigCookieBlock = pNext; - } - // Clean up the IL stub cache if (m_pILStubCache != NULL) { @@ -4207,12 +4197,13 @@ static bool MethodSignatureContainsGenericVariables(SigParser& sp) //========================================================================== // Enregisters a VASig. //========================================================================== -VASigCookie *Module::GetVASigCookie(Signature vaSignature, const SigTypeContext* typeContext) +VASigCookie *Module::GetVASigCookie(Signature vaSignature, MethodDesc* pMD, const SigTypeContext* typeContext) { CONTRACT(VASigCookie*) { INSTANCE_CHECK; STANDARD_VM_CHECK; + PRECONDITION(pMD == NULL || pMD->IsPInvoke()); // Only PInvoke methods are embedded in VASig cookies. POSTCONDITION(CheckPointer(RETVAL)); INJECT_FAULT(COMPlusThrowOM()); } @@ -4245,42 +4236,48 @@ VASigCookie *Module::GetVASigCookie(Signature vaSignature, const SigTypeContext* #endif } - VASigCookie *pCookie = GetVASigCookieWorker(this, pLoaderModule, vaSignature, typeContext); + VASigCookie *pCookie = GetVASigCookieWorker(this, pLoaderModule, pMD, vaSignature, typeContext); RETURN pCookie; } -VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoaderModule, Signature vaSignature, const SigTypeContext* typeContext) +VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoaderModule, MethodDesc* pMD, Signature vaSignature, const SigTypeContext* typeContext) { CONTRACT(VASigCookie*) { STANDARD_VM_CHECK; + PRECONDITION(pMD == NULL || pMD->IsPInvoke()); POSTCONDITION(CheckPointer(RETVAL)); INJECT_FAULT(COMPlusThrowOM()); } CONTRACT_END; - VASigCookieBlock *pBlock; - VASigCookie *pCookie; - - pCookie = NULL; + VASigCookie *pCookie = NULL; - // First, see if we already enregistered this sig. + // First, see if we already have a match for this signature. // Note that we're outside the lock here, so be a bit careful with our logic + VASigCookieBlock* pBlock; for (pBlock = pLoaderModule->m_pVASigCookieBlock; pBlock != NULL; pBlock = pBlock->m_Next) { - for (UINT i = 0; i < pBlock->m_numcookies; i++) + for (UINT i = 0; i < pBlock->m_numCookies; i++) { - if (pBlock->m_cookies[i].signature.GetRawSig() == vaSignature.GetRawSig()) + VASigCookie* cookieMaybe = &pBlock->m_cookies[i]; + + // Check if the cookie targets the same MethodDesc. + if (cookieMaybe->pMethodDesc != pMD) + continue; + + // Check if the cookie has the same signature. + if (cookieMaybe->signature.GetRawSig() == vaSignature.GetRawSig()) { - _ASSERTE(pBlock->m_cookies[i].classInst.GetNumArgs() == typeContext->m_classInst.GetNumArgs()); - _ASSERTE(pBlock->m_cookies[i].methodInst.GetNumArgs() == typeContext->m_methodInst.GetNumArgs()); + _ASSERTE(cookieMaybe->classInst.GetNumArgs() == typeContext->m_classInst.GetNumArgs()); + _ASSERTE(cookieMaybe->methodInst.GetNumArgs() == typeContext->m_methodInst.GetNumArgs()); bool instMatch = true; - for (DWORD j = 0; j < pBlock->m_cookies[i].classInst.GetNumArgs(); j++) + for (DWORD j = 0; j < cookieMaybe->classInst.GetNumArgs(); j++) { - if (pBlock->m_cookies[i].classInst[j] != typeContext->m_classInst[j]) + if (cookieMaybe->classInst[j] != typeContext->m_classInst[j]) { instMatch = false; break; @@ -4289,9 +4286,9 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad if (instMatch) { - for (DWORD j = 0; j < pBlock->m_cookies[i].methodInst.GetNumArgs(); j++) + for (DWORD j = 0; j < cookieMaybe->methodInst.GetNumArgs(); j++) { - if (pBlock->m_cookies[i].methodInst[j] != typeContext->m_methodInst[j]) + if (cookieMaybe->methodInst[j] != typeContext->m_methodInst[j]) { instMatch = false; break; @@ -4301,7 +4298,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad if (instMatch) { - pCookie = &(pBlock->m_cookies[i]); + pCookie = cookieMaybe; break; } } @@ -4321,7 +4318,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad DWORD sizeOfArgs = argit.SizeOfArgStack(); // Prepare instantiation - LoaderAllocator *pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); + LoaderAllocator* pLoaderAllocator = pLoaderModule->GetLoaderAllocator(); DWORD classInstCount = typeContext->m_classInst.GetNumArgs(); DWORD methodInstCount = typeContext->m_methodInst.GetNumArgs(); @@ -4338,26 +4335,27 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad // occasional duplicate cookie instead. // Is the first block in the list full? - if (pLoaderModule->m_pVASigCookieBlock && pLoaderModule->m_pVASigCookieBlock->m_numcookies - < VASigCookieBlock::kVASigCookieBlockSize) + if (pLoaderModule->m_pVASigCookieBlock + && pLoaderModule->m_pVASigCookieBlock->m_numCookies < VASigCookieBlock::kVASigCookieBlockSize) { // Nope, reserve a new slot in the existing block. - pCookie = &(pLoaderModule->m_pVASigCookieBlock->m_cookies[pLoaderModule->m_pVASigCookieBlock->m_numcookies]); + pCookie = &(pLoaderModule->m_pVASigCookieBlock->m_cookies[pLoaderModule->m_pVASigCookieBlock->m_numCookies]); } else { // Yes, create a new block. - VASigCookieBlock *pNewBlock = new VASigCookieBlock(); - + VASigCookieBlock* pNewBlock = (VASigCookieBlock*)(void*)pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(VASigCookieBlock))); pNewBlock->m_Next = pLoaderModule->m_pVASigCookieBlock; - pNewBlock->m_numcookies = 0; + pNewBlock->m_numCookies = 0; pLoaderModule->m_pVASigCookieBlock = pNewBlock; + pCookie = &(pNewBlock->m_cookies[0]); } // Now, fill in the new cookie (assuming we had enough memory to create one.) pCookie->pModule = pDefiningModule; - pCookie->pPInvokeILStub = 0; + pCookie->pPInvokeILStub = (PCODE)NULL; + pCookie->pMethodDesc = pMD; pCookie->sizeOfArgs = sizeOfArgs; pCookie->signature = vaSignature; pCookie->pLoaderModule = pLoaderModule; @@ -4366,7 +4364,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad if (classInstCount != 0) { - TypeHandle* pClassInst = (TypeHandle*)(void*)amt.Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(classInstCount) * S_SIZE_T(sizeof(TypeHandle)))); + TypeHandle* pClassInst = (TypeHandle*)amt.Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(classInstCount) * S_SIZE_T(sizeof(TypeHandle)))); for (DWORD i = 0; i < classInstCount; i++) { pClassInst[i] = typeContext->m_classInst[i]; @@ -4376,7 +4374,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad if (methodInstCount != 0) { - TypeHandle* pMethodInst = (TypeHandle*)(void*)amt.Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(methodInstCount) * S_SIZE_T(sizeof(TypeHandle)))); + TypeHandle* pMethodInst = (TypeHandle*)amt.Track(pLoaderAllocator->GetHighFrequencyHeap()->AllocMem(S_SIZE_T(methodInstCount) * S_SIZE_T(sizeof(TypeHandle)))); for (DWORD i = 0; i < methodInstCount; i++) { pMethodInst[i] = typeContext->m_methodInst[i]; @@ -4388,7 +4386,7 @@ VASigCookie *Module::GetVASigCookieWorker(Module* pDefiningModule, Module* pLoad // Finally, now that it's safe for asynchronous readers to see it, // update the count. - pLoaderModule->m_pVASigCookieBlock->m_numcookies++; + pLoaderModule->m_pVASigCookieBlock->m_numCookies++; } } diff --git a/src/coreclr/vm/ceeload.h b/src/coreclr/vm/ceeload.h index 3cea27da8fbcee..758fb21026fc40 100644 --- a/src/coreclr/vm/ceeload.h +++ b/src/coreclr/vm/ceeload.h @@ -345,6 +345,7 @@ struct VASigCookie // so please keep this field first unsigned sizeOfArgs; // size of argument list Volatile pPInvokeILStub; // will be use if target is PInvoke (tag == 0) + PTR_MethodDesc pMethodDesc; // Only non-null if this is a PInvoke method PTR_Module pModule; PTR_Module pLoaderModule; Signature signature; @@ -357,7 +358,7 @@ struct VASigCookie // allocation cost and allow proper bookkeeping. // -struct VASigCookieBlock +struct VASigCookieBlock final { enum { #ifdef _DEBUG @@ -368,7 +369,7 @@ struct VASigCookieBlock }; VASigCookieBlock *m_Next; - UINT m_numcookies; + UINT m_numCookies; VASigCookie m_cookies[kVASigCookieBlockSize]; }; @@ -1397,9 +1398,9 @@ class Module : public ModuleBase void NotifyEtwLoadFinished(HRESULT hr); // Enregisters a VASig. - VASigCookie *GetVASigCookie(Signature vaSignature, const SigTypeContext* typeContext); + VASigCookie *GetVASigCookie(Signature vaSignature, MethodDesc* pMD, const SigTypeContext* typeContext); private: - static VASigCookie *GetVASigCookieWorker(Module* pDefiningModule, Module* pLoaderModule, Signature vaSignature, const SigTypeContext* typeContext); + static VASigCookie *GetVASigCookieWorker(Module* pDefiningModule, Module* pLoaderModule, MethodDesc* pMD, Signature vaSignature, const SigTypeContext* typeContext); public: #ifndef DACCESS_COMPILE diff --git a/src/coreclr/vm/cgensys.h b/src/coreclr/vm/cgensys.h index fe60438ba6db3c..2e8a6b1a93a161 100644 --- a/src/coreclr/vm/cgensys.h +++ b/src/coreclr/vm/cgensys.h @@ -50,7 +50,7 @@ enum class CallerGCMode // Non-CPU-specific helper functions called by the CPU-dependent code extern "C" PCODE STDCALL PreStubWorker(TransitionBlock * pTransitionBlock, MethodDesc * pMD); -extern "C" void STDCALL VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, MethodDesc * pMD); +extern "C" void STDCALL VarargPInvokeStubWorker(TransitionBlock* pTransitionBlock, VASigCookie* pVASigCookie); extern "C" void STDCALL VarargPInvokeStub(void); extern "C" void STDCALL VarargPInvokeStub_RetBuffArg(void); diff --git a/src/coreclr/vm/codeman.cpp b/src/coreclr/vm/codeman.cpp index 03aaed2aa9515f..8a8b32596d7199 100644 --- a/src/coreclr/vm/codeman.cpp +++ b/src/coreclr/vm/codeman.cpp @@ -2273,13 +2273,13 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap CONTRACT(HeapList *) { THROWS; GC_NOTRIGGER; - POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); + POSTCONDITION((RETVAL != NULL) || !pInfo->GetThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; - size_t reserveSize = pInfo->getReserveSize(); - size_t initialRequestSize = pInfo->getRequestSize(); - const BYTE * loAddr = pInfo->m_loAddr; - const BYTE * hiAddr = pInfo->m_hiAddr; + size_t reserveSize = pInfo->GetReserveSize(); + size_t initialRequestSize = pInfo->GetRequestSize(); + const BYTE * loAddr = pInfo->GetLoAddr(); + const BYTE * hiAddr = pInfo->GetHiAddr(); // Make sure that what we are reserving will fix inside a DWORD if (reserveSize != (DWORD) reserveSize) @@ -2290,7 +2290,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap LOG((LF_JIT, LL_INFO100, "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, %sexecutable, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n", - (DWORD) reserveSize, (DWORD) initialRequestSize, pInfo->IsInterpreted() ? "non-" : "", DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr) + (DWORD) reserveSize, (DWORD) initialRequestSize, pInfo->IsInterpreted() ? "non-" : "", DBG_ADDR(pInfo->GetAllocator()), DBG_ADDR(loAddr), DBG_ADDR(hiAddr) )); NewHolder pCodeHeap(new LoaderCodeHeap(!pInfo->IsInterpreted() /* fMakeExecutable */)); @@ -2306,7 +2306,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap allocationSize += pCodeHeap->m_LoaderHeap.AllocMem_TotalSize(JUMP_ALLOCATE_SIZE); } #endif - pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)allocationSize, &dwSizeAcquiredFromInitialBlock); + pBaseAddr = (BYTE *)pInfo->GetAllocator()->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)allocationSize, &dwSizeAcquiredFromInitialBlock); if (pBaseAddr != NULL) { pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE); @@ -2327,7 +2327,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap { #ifdef _DEBUG // Always exercise the fallback path in the caller when forced relocs are turned on - if (!pInfo->getThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs()) + if (!pInfo->GetThrowOnOutOfMemoryWithinRange() && PEDecoder::GetForceRelocs()) RETURN NULL; #endif pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(reserveSize, loAddr, hiAddr); @@ -2335,7 +2335,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap if (!pBaseAddr) { // Conserve emergency jump stub reserve until when it is really needed - if (!pInfo->getThrowOnOutOfMemoryWithinRange()) + if (!pInfo->GetThrowOnOutOfMemoryWithinRange()) RETURN NULL; #ifdef TARGET_AMD64 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize); @@ -2410,7 +2410,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap } #endif // TARGET_64BIT - pHp->pLoaderAllocator = pInfo->m_pAllocator; + pHp->pLoaderAllocator = pInfo->GetAllocator(); LOG((LF_JIT, LL_INFO100, "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n", @@ -2441,21 +2441,47 @@ void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD return p; } -void CodeHeapRequestInfo::Init() +CodeHeapRequestInfo::CodeHeapRequestInfo(MethodDesc* pMD) + : CodeHeapRequestInfo{ pMD, NULL, NULL, NULL } { - CONTRACTL { + LIMITED_METHOD_CONTRACT; +} + +CodeHeapRequestInfo::CodeHeapRequestInfo(LoaderAllocator* pAllocator) + : CodeHeapRequestInfo{ NULL, pAllocator, NULL, NULL } +{ + LIMITED_METHOD_CONTRACT; +} + +CodeHeapRequestInfo::CodeHeapRequestInfo(MethodDesc* pMD, LoaderAllocator* pAllocator, BYTE* loAddr, BYTE* hiAddr) + : m_pAllocator{ pAllocator } + , m_loAddr{ loAddr } + , m_hiAddr{ hiAddr } + , m_requestSize{ 0 } + , m_reserveSize{ 0 } + , m_reserveForJumpStubs{ 0 } + , m_isDynamicDomain{ pMD != NULL && pMD->IsDynamicMethod() } + , m_isCollectible{ false } + , m_isInterpreted{ false } + , m_throwOnOutOfMemoryWithinRange{ true } +{ + CONTRACTL + { NOTHROW; GC_NOTRIGGER; - PRECONDITION((m_hiAddr == 0) || - ((m_loAddr < m_hiAddr) && - ((m_loAddr + m_requestSize) < m_hiAddr))); - } CONTRACTL_END; + PRECONDITION((m_hiAddr == 0) + || ((m_loAddr < m_hiAddr) + && ((m_loAddr + m_requestSize) < m_hiAddr))); + } + CONTRACTL_END; + + if (m_pAllocator == NULL && pMD != NULL) + { + m_pAllocator = pMD->GetLoaderAllocator(); + } - if (m_pAllocator == NULL) - m_pAllocator = m_pMD->GetLoaderAllocator(); - m_isDynamicDomain = (m_pMD != NULL) && m_pMD->IsLCGMethod(); - m_isCollectible = m_pAllocator->IsCollectible(); - m_throwOnOutOfMemoryWithinRange = true; + if (m_pAllocator != NULL) + m_isCollectible = m_pAllocator->IsCollectible(); } #ifdef FEATURE_EH_FUNCLETS @@ -2506,14 +2532,14 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapLock.OwnedByCurrentThread()); - POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); + POSTCONDITION((RETVAL != NULL) || !pInfo->GetThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; - size_t initialRequestSize = pInfo->getRequestSize(); + size_t initialRequestSize = pInfo->GetRequestSize(); size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB) #ifdef HOST_64BIT - if (pInfo->m_hiAddr == 0) + if (pInfo->GetHiAddr() == 0) { if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD) { @@ -2542,7 +2568,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe reserveSize = minReserveSize; reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); - pInfo->setReserveSize(reserveSize); + pInfo->SetReserveSize(reserveSize); HeapList *pHp = NULL; @@ -2569,7 +2595,7 @@ HeapList* EECodeGenManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHe } if (pHp == NULL) { - _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); + _ASSERTE(!pInfo->GetThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } @@ -2636,10 +2662,10 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, THROWS; GC_NOTRIGGER; PRECONDITION(m_CodeHeapLock.OwnedByCurrentThread()); - POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); + POSTCONDITION((RETVAL != NULL) || !pInfo->GetThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; - pInfo->setRequestSize(header+blockSize+(align-1)+pInfo->getReserveForJumpStubs()); + pInfo->SetRequestSize(header+blockSize+(align-1)+pInfo->GetReserveForJumpStubs()); void * mem = NULL; HeapList * pCodeHeap = NULL; @@ -2651,14 +2677,14 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { - pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedInterpreterDynamicCodeHeap; - pInfo->m_pAllocator->m_pLastUsedInterpreterDynamicCodeHeap = NULL; + pCodeHeap = (HeapList *)pInfo->GetAllocator()->m_pLastUsedInterpreterDynamicCodeHeap; + pInfo->GetAllocator()->m_pLastUsedInterpreterDynamicCodeHeap = NULL; } else #endif // FEATURE_INTERPRETER { - pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap; - pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL; + pCodeHeap = (HeapList *)pInfo->GetAllocator()->m_pLastUsedDynamicCodeHeap; + pInfo->GetAllocator()->m_pLastUsedDynamicCodeHeap = NULL; } } else @@ -2666,26 +2692,26 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { - pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedInterpreterCodeHeap; - pInfo->m_pAllocator->m_pLastUsedInterpreterCodeHeap = NULL; + pCodeHeap = (HeapList *)pInfo->GetAllocator()->m_pLastUsedInterpreterCodeHeap; + pInfo->GetAllocator()->m_pLastUsedInterpreterCodeHeap = NULL; } else #endif // FEATURE_INTERPRETER { - pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap; - pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL; + pCodeHeap = (HeapList *)pInfo->GetAllocator()->m_pLastUsedCodeHeap; + pInfo->GetAllocator()->m_pLastUsedCodeHeap = NULL; } } // If we will use a cached code heap, ensure that the code heap meets the constraints if (pCodeHeap && CanUseCodeHeap(pInfo, pCodeHeap)) { - mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); + mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->GetReserveForJumpStubs()); } if (mem == NULL) { - pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator); + pList = GetCodeHeapList(pInfo, pInfo->GetAllocator()); if (pList != NULL) { for (int i = 0; i < pList->m_CodeHeapList.Count(); i++) @@ -2695,7 +2721,7 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, // Validate that the code heap can be used for the current request if (CanUseCodeHeap(pInfo, pCodeHeap)) { - mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); + mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->GetReserveForJumpStubs()); if (mem != NULL) break; } @@ -2709,18 +2735,18 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, { // not found so need to create the first one pList = CreateCodeHeapList(pInfo); - _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->m_pAllocator)); + _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->GetAllocator())); } _ASSERTE(pList); pCodeHeap = NewCodeHeap(pInfo, pList); if (pCodeHeap == NULL) { - _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); + _ASSERTE(!pInfo->GetThrowOnOutOfMemoryWithinRange()); RETURN(NULL); } - mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->getReserveForJumpStubs()); + mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align, pInfo->GetReserveForJumpStubs()); if (mem == NULL) ThrowOutOfMemory(); _ASSERTE(mem); @@ -2732,12 +2758,12 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { - pInfo->m_pAllocator->m_pLastUsedInterpreterDynamicCodeHeap = pCodeHeap; + pInfo->GetAllocator()->m_pLastUsedInterpreterDynamicCodeHeap = pCodeHeap; } else #endif // FEATURE_INTERPRETER { - pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap; + pInfo->GetAllocator()->m_pLastUsedDynamicCodeHeap = pCodeHeap; } } else @@ -2745,12 +2771,12 @@ void* EECodeGenManager::AllocCodeWorker(CodeHeapRequestInfo *pInfo, #ifdef FEATURE_INTERPRETER if (pInfo->IsInterpreted()) { - pInfo->m_pAllocator->m_pLastUsedInterpreterCodeHeap = pCodeHeap; + pInfo->GetAllocator()->m_pLastUsedInterpreterCodeHeap = pCodeHeap; } else #endif // FEATURE_INTERPRETER { - pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap; + pInfo->GetAllocator()->m_pLastUsedCodeHeap = pCodeHeap; } } @@ -2830,7 +2856,7 @@ void EECodeGenManager::AllocCode(MethodDesc* pMD, size_t blockSize, size_t reser else #endif // FEATURE_INTERPRETER { - requestInfo.setReserveForJumpStubs(reserveForJumpStubs); + requestInfo.SetReserveForJumpStubs(reserveForJumpStubs); #ifdef FEATURE_EH_FUNCLETS realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos); @@ -2900,7 +2926,20 @@ void EECodeGenManager::AllocCode(MethodDesc* pMD, size_t blockSize, size_t reser pCodeHdrRW->SetDebugInfo(NULL); pCodeHdrRW->SetEHInfo(NULL); pCodeHdrRW->SetGCInfo(NULL); - pCodeHdrRW->SetMethodDesc(pMD); + + // We want the target MethodDesc to be the same as the one in the code header. + MethodDesc* pMDTarget = pMD; + if (pMD->IsILStub()) + { + DynamicMethodDesc* pDMD = pMD->AsDynamicMethodDesc(); + + // If the IL Stub is a P/Invoke stub, set the CodeHeader's MethodDesc + // to be the real target method and not the stub. + if (pDMD->IsPInvokeStub()) + pMDTarget = pDMD->GetILStubResolver()->GetStubTargetMethodDesc(); + } + pCodeHdrRW->SetMethodDesc(pMDTarget); + #ifdef FEATURE_EH_FUNCLETS if (std::is_same::value) { @@ -2989,7 +3028,7 @@ bool EECodeGenManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCod PRECONDITION(m_CodeHeapLock.OwnedByCurrentThread()); } CONTRACTL_END; - if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0)) + if ((pInfo->GetLoAddr() == 0) && (pInfo->GetHiAddr() == 0)) { // We have no constraint so this non empty heap will be able to satisfy our request if (pInfo->IsDynamicDomain()) @@ -3002,7 +3041,7 @@ bool EECodeGenManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCod BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize; BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; - BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; + BYTE * hiRequestAddr = loRequestAddr + pInfo->GetRequestSize() + BYTES_PER_BUCKET; if (hiRequestAddr <= lastAddr - pCodeHeap->reserveForJumpStubs) { return true; @@ -3035,10 +3074,10 @@ bool EECodeGenManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCod // when calling AllocMemory with a DynamicDomain // [firstaddr .. lastAddr] must be entirely within - // [pInfo->m_loAddr .. pInfo->m_hiAddr] + // [pInfo->GetLoAddr() .. pInfo->GetHiAddr()] // - if ((pInfo->m_loAddr <= firstAddr) && - (lastAddr <= pInfo->m_hiAddr)) + if ((pInfo->GetLoAddr() <= firstAddr) && + (lastAddr <= pInfo->GetHiAddr())) { // This heap will always satisfy our constraint return true; @@ -3050,18 +3089,18 @@ bool EECodeGenManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCod // next allocation request into [loRequestAddr..hiRequestAddr] // BYTE * loRequestAddr = (BYTE *) pCodeHeap->endAddress; - BYTE * hiRequestAddr = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET; + BYTE * hiRequestAddr = loRequestAddr + pInfo->GetRequestSize() + BYTES_PER_BUCKET; _ASSERTE(loRequestAddr <= hiRequestAddr); // loRequestAddr and hiRequestAddr must be entirely within - // [pInfo->m_loAddr .. pInfo->m_hiAddr] + // [pInfo->GetLoAddr() .. pInfo->GetHiAddr()] // - if ((pInfo->m_loAddr <= loRequestAddr) && - (hiRequestAddr <= pInfo->m_hiAddr)) + if ((pInfo->GetLoAddr() <= loRequestAddr) && + (hiRequestAddr <= pInfo->GetHiAddr())) { // Additionally hiRequestAddr must also be less than or equal to lastAddr. // If throwOnOutOfMemoryWithinRange is not set, conserve reserveForJumpStubs until when it is really needed. - if (hiRequestAddr <= lastAddr - (pInfo->getThrowOnOutOfMemoryWithinRange() ? 0 : pCodeHeap->reserveForJumpStubs)) + if (hiRequestAddr <= lastAddr - (pInfo->GetThrowOnOutOfMemoryWithinRange() ? 0 : pCodeHeap->reserveForJumpStubs)) { // This heap will be able to satisfy our constraint return true; @@ -3082,7 +3121,7 @@ EEJitManager::DomainCodeHeapList* EECodeGenManager::CreateCodeHeapList(CodeHeapR PRECONDITION(m_CodeHeapLock.OwnedByCurrentThread()); } CONTRACTL_END; - NewHolder pNewList(new DomainCodeHeapList(pInfo->m_pAllocator)); + NewHolder pNewList(new DomainCodeHeapList(pInfo->GetAllocator())); DomainCodeHeapList** ppList = pInfo->IsDynamicDomain() ? m_DynamicDomainCodeHeaps.AppendThrowing() @@ -3110,21 +3149,23 @@ JumpStubBlockHeader * EEJitManager::AllocJumpStubBlock(MethodDesc* pMD, DWORD n LoaderAllocator *pLoaderAllocator, bool throwOnOutOfMemoryWithinRange) { - CONTRACT(JumpStubBlockHeader *) { + CONTRACT(JumpStubBlockHeader *) + { THROWS; GC_NOTRIGGER; PRECONDITION(loAddr < hiAddr); PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION((RETVAL != NULL) || !throwOnOutOfMemoryWithinRange); - } CONTRACT_END; + } + CONTRACT_END; _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0); size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE; HeapList *pCodeHeap = NULL; - CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr); - requestInfo.setThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange); + CodeHeapRequestInfo requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr); + requestInfo.SetThrowOnOutOfMemoryWithinRange(throwOnOutOfMemoryWithinRange); TADDR mem; ExecutableWriterHolderNoLog blockWriterHolder; @@ -3170,20 +3211,22 @@ JumpStubBlockHeader * EEJitManager::AllocJumpStubBlock(MethodDesc* pMD, DWORD n void * EEJitManager::AllocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind) { - CONTRACT(void *) { + CONTRACT(void *) + { THROWS; GC_NOTRIGGER; PRECONDITION(pLoaderAllocator != NULL); POSTCONDITION(CheckPointer(RETVAL)); - } CONTRACT_END; + } + CONTRACT_END; HeapList *pCodeHeap = NULL; - CodeHeapRequestInfo requestInfo(NULL, pLoaderAllocator, NULL, NULL); + CodeHeapRequestInfo requestInfo(pLoaderAllocator); #ifdef TARGET_AMD64 // CodeFragments are pretty much always Precodes that may need to be patched with jump stubs at some point in future // We will assume the worst case that every FixupPrecode will need to be patched and reserve the jump stubs accordingly - requestInfo.setReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE); + requestInfo.SetReserveForJumpStubs((blockSize / 8) * JUMP_ALLOCATE_SIZE); #endif TADDR mem; @@ -3202,7 +3245,7 @@ void * EEJitManager::AllocCodeFragmentBlock(size_t blockSize, unsigned alignment NibbleMapSetUnlocked(pCodeHeap, mem, blockSize); // Record the jump stub reservation - pCodeHeap->reserveForJumpStubs += requestInfo.getReserveForJumpStubs(); + pCodeHeap->reserveForJumpStubs += requestInfo.GetReserveForJumpStubs(); } RETURN((void *)mem); @@ -6317,7 +6360,7 @@ size_t ReadyToRunJitManager::WalkILOffsets( BoundsType boundsType, void* pContext, size_t (* pfnWalkILOffsets)(ICorDebugInfo::OffsetMapping *pOffsetMapping, void *pContext)) -{ +{ CONTRACTL { THROWS; // on OOM. GC_NOTRIGGER; // getting vars shouldn't trigger diff --git a/src/coreclr/vm/codeman.h b/src/coreclr/vm/codeman.h index 47304d833fc0ea..bcb2e5979c17c2 100644 --- a/src/coreclr/vm/codeman.h +++ b/src/coreclr/vm/codeman.h @@ -404,12 +404,11 @@ typedef DPTR(InterpreterCodeHeader) PTR_InterpreterCodeHeader; // When creating new JumpStubs we have a constarint that the address used // should be in the range [loAddr..hiAddr] // -struct CodeHeapRequestInfo +class CodeHeapRequestInfo final { - MethodDesc * m_pMD; LoaderAllocator* m_pAllocator; - const BYTE * m_loAddr; // lowest address to use to satisfy our request (0 -- don't care) - const BYTE * m_hiAddr; // hihest address to use to satisfy our request (0 -- don't care) + const BYTE* m_loAddr; // lowest address to use to satisfy our request (0 -- don't care) + const BYTE* m_hiAddr; // highest address to use to satisfy our request (0 -- don't care) size_t m_requestSize; // minimum size that must be made available size_t m_reserveSize; // Amount that VirtualAlloc will reserved size_t m_reserveForJumpStubs; // Amount to reserve for jump stubs (won't be allocated) @@ -418,42 +417,35 @@ struct CodeHeapRequestInfo bool m_isInterpreted; bool m_throwOnOutOfMemoryWithinRange; +public: + CodeHeapRequestInfo(MethodDesc* pMD); + CodeHeapRequestInfo(LoaderAllocator* pAllocator); + CodeHeapRequestInfo(MethodDesc* pMD, LoaderAllocator* pAllocator, BYTE* loAddr, BYTE* hiAddr); + + LoaderAllocator* GetAllocator() { return m_pAllocator; } + bool IsDynamicDomain() { return m_isDynamicDomain; } void SetDynamicDomain() { m_isDynamicDomain = true; } + const BYTE* GetLoAddr() { return m_loAddr; } + const BYTE* GetHiAddr() { return m_hiAddr; } + bool IsCollectible() { return m_isCollectible; } - bool IsInterpreted() { return m_isInterpreted; } + bool IsInterpreted() { return m_isInterpreted; } void SetInterpreted() { m_isInterpreted = true; } - size_t getRequestSize() { return m_requestSize; } - void setRequestSize(size_t requestSize) { m_requestSize = requestSize; } - - size_t getReserveSize() { return m_reserveSize; } - void setReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; } - - size_t getReserveForJumpStubs() { return m_reserveForJumpStubs; } - void setReserveForJumpStubs(size_t size) { m_reserveForJumpStubs = size; } - - bool getThrowOnOutOfMemoryWithinRange() { return m_throwOnOutOfMemoryWithinRange; } - void setThrowOnOutOfMemoryWithinRange(bool value) { m_throwOnOutOfMemoryWithinRange = value; } + size_t GetRequestSize() { return m_requestSize; } + void SetRequestSize(size_t requestSize) { m_requestSize = requestSize; } - void Init(); + size_t GetReserveSize() { return m_reserveSize; } + void SetReserveSize(size_t reserveSize) { m_reserveSize = reserveSize; } - CodeHeapRequestInfo(MethodDesc *pMD) - : m_pMD(pMD), m_pAllocator(0), - m_loAddr(0), m_hiAddr(0), - m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0) - , m_isInterpreted(false) - { WRAPPER_NO_CONTRACT; Init(); } + size_t GetReserveForJumpStubs() { return m_reserveForJumpStubs; } + void SetReserveForJumpStubs(size_t size) { m_reserveForJumpStubs = size; } - CodeHeapRequestInfo(MethodDesc *pMD, LoaderAllocator* pAllocator, - BYTE * loAddr, BYTE * hiAddr) - : m_pMD(pMD), m_pAllocator(pAllocator), - m_loAddr(loAddr), m_hiAddr(hiAddr), - m_requestSize(0), m_reserveSize(0), m_reserveForJumpStubs(0) - , m_isInterpreted(false) - { WRAPPER_NO_CONTRACT; Init(); } + bool GetThrowOnOutOfMemoryWithinRange() { return m_throwOnOutOfMemoryWithinRange; } + void SetThrowOnOutOfMemoryWithinRange(bool value) { m_throwOnOutOfMemoryWithinRange = value; } }; //----------------------------------------------------------------------------- diff --git a/src/coreclr/vm/comdelegate.cpp b/src/coreclr/vm/comdelegate.cpp index cc9535ca13f6ed..5f32b1891a206f 100644 --- a/src/coreclr/vm/comdelegate.cpp +++ b/src/coreclr/vm/comdelegate.cpp @@ -1535,7 +1535,7 @@ void COMDelegate::ValidateDelegatePInvoke(MethodDesc* pMD) if (pMD->IsSynchronized()) COMPlusThrow(kTypeLoadException, IDS_EE_NOSYNCHRONIZED); - if (pMD->MethodDesc::IsVarArg()) + if (pMD->IsVarArg()) COMPlusThrow(kNotSupportedException, IDS_EE_VARARG_NOT_SUPPORTED); } diff --git a/src/coreclr/vm/dllimport.cpp b/src/coreclr/vm/dllimport.cpp index e6171f60ebebe2..32bba775508df3 100644 --- a/src/coreclr/vm/dllimport.cpp +++ b/src/coreclr/vm/dllimport.cpp @@ -209,6 +209,33 @@ StubSigDesc::StubSigDesc(const Signature& sig, Module* pModule) #ifndef DACCESS_COMPILE +static bool IsSharedStubScenario(DWORD dwStubFlags) +{ + WRAPPER_NO_CONTRACT; + + if (SF_IsTailCallStoreArgsStub(dwStubFlags) || SF_IsTailCallCallTargetStub(dwStubFlags)) + { + return false; + } + + if (SF_IsFieldGetterStub(dwStubFlags) || SF_IsFieldSetterStub(dwStubFlags)) + { + return false; + } + + if (SF_IsAsyncResumeStub(dwStubFlags)) + { + return false; + } + + if (SF_IsForwardPInvokeStub(dwStubFlags) && !SF_IsCALLIStub(dwStubFlags)) + { + return false; + } + + return true; +} + class StubState { public: @@ -219,7 +246,7 @@ class StubState virtual void MarshalLCID(int argIdx) = 0; virtual void MarshalField(MarshalInfo* pInfo, UINT32 managedOffset, UINT32 nativeOffset, FieldDesc* pFieldDesc) = 0; - virtual void EmitInvokeTarget(MethodDesc *pStubMD) = 0; + virtual void EmitInvokeTarget(MethodDesc* pTargetMD, MethodDesc* pStubMD) = 0; virtual void FinishEmit(MethodDesc* pMD) = 0; @@ -241,13 +268,24 @@ class ILStubState : public StubState int iLCIDParamIdx, MethodDesc* pTargetMD) : m_slIL(dwStubFlags, pStubModule, signature, pTypeContext, pTargetMD, iLCIDParamIdx) - , m_dwStubFlags(dwStubFlags) + , m_dwStubFlags(UpdateStubFlags(dwStubFlags)) { STANDARD_VM_CONTRACT; m_fSetLastError = 0; } +private: + static DWORD UpdateStubFlags(DWORD dwStubFlags) + { + WRAPPER_NO_CONTRACT; + + if (IsSharedStubScenario(dwStubFlags)) + dwStubFlags |= PINVOKESTUB_FL_SHARED_STUB; + + return dwStubFlags; + } + public: void SetLastError(BOOL fSetLastError) { @@ -489,11 +527,11 @@ class ILStubState : public StubState pStubMD->AsDynamicMethodDesc()->SetStoredMethodSig(pNewSig, cbNewSig); } - void EmitInvokeTarget(MethodDesc *pStubMD) + void EmitInvokeTarget(MethodDesc* pTargetMD, MethodDesc* pStubMD) { STANDARD_VM_CONTRACT; - m_slIL.DoPInvoke(m_slIL.GetDispatchCodeStream(), m_dwStubFlags, pStubMD); + m_slIL.DoPInvoke(m_slIL.GetDispatchCodeStream(), m_dwStubFlags, pTargetMD); } virtual void EmitExceptionHandler(LocalDesc* pNativeReturnType, LocalDesc* pManagedReturnType, @@ -709,7 +747,7 @@ class ILStubState : public StubState } // Invoke the target (calli, call method, call delegate, get/set field, etc.) - EmitInvokeTarget(pStubMD); + EmitInvokeTarget(m_slIL.GetTargetMD(), pStubMD); // Saving last error must be the first thing we do after returning from the target if (m_fSetLastError && SF_IsForwardStub(m_dwStubFlags)) @@ -808,13 +846,18 @@ class ILStubState : public StubState } else if (SF_IsStructMarshalStub(m_dwStubFlags)) { - // Struct marshal stubs don't actually call anything so they do not need the secrect parameter. + // Struct marshal stubs don't actually call anything so they do not need the secret parameter. } else if (SF_IsForwardDelegateStub(m_dwStubFlags)) { // Forward delegate stubs get all the context they need in 'this' so they // don't use the secret parameter. } + else if (SF_IsForwardPInvokeStub(m_dwStubFlags) + && !SF_IsCALLIStub(m_dwStubFlags)) + { + // Forward stubs (i.e., PInvokes) don't use the secret parameter + } else { // All other IL stubs will need to use the secret parameter. @@ -1559,7 +1602,7 @@ class COMToCLRFieldAccess_ILStubState : public COMToCLR_ILStubState m_pFD = pFD; } - void EmitInvokeTarget(MethodDesc *pStubMD) + void EmitInvokeTarget(MethodDesc* pTargetMD, MethodDesc* pStubMD) { STANDARD_VM_CONTRACT; @@ -2084,7 +2127,7 @@ void PInvokeStubLinker::End(DWORD dwStubFlags) } } -void PInvokeStubLinker::DoPInvoke(ILCodeStream *pcsEmit, DWORD dwStubFlags, MethodDesc * pStubMD) +void PInvokeStubLinker::DoPInvoke(ILCodeStream *pcsEmit, DWORD dwStubFlags, MethodDesc* pMD) { STANDARD_VM_CONTRACT; @@ -2103,38 +2146,32 @@ void PInvokeStubLinker::DoPInvoke(ILCodeStream *pcsEmit, DWORD dwStubFlags, Meth pcsEmit->EmitLoadThis(); pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_DELEGATE_TARGET, 1, 1); } - else // direct invocation - { - if (SF_IsCALLIStub(dwStubFlags)) // unmanaged CALLI - { - // for managed-to-unmanaged CALLI that requires marshaling, the target is passed - // as the secret argument to the stub by GenericPInvokeCalliHelper (asmhelpers.asm) - EmitLoadStubContext(pcsEmit, dwStubFlags); -#ifdef TARGET_64BIT - // the secret arg has been shifted to left and ORed with 1 (see code:GenericPInvokeCalliHelper) - pcsEmit->EmitLDC(1); - pcsEmit->EmitSHR_UN(); -#endif - } - else -#ifdef FEATURE_COMINTEROP - if (!SF_IsCOMStub(dwStubFlags)) // forward P/Invoke -#endif // FEATURE_COMINTEROP - { - EmitLoadStubContext(pcsEmit, dwStubFlags); - - pcsEmit->EmitLDC(offsetof(PInvokeMethodDesc, m_pPInvokeTarget)); - pcsEmit->EmitADD(); - pcsEmit->EmitLDIND_I(); - } #ifdef FEATURE_COMINTEROP - else - { - // this is a CLR -> COM call - // the target has been computed by StubHelpers::GetCOMIPFromRCW - pcsEmit->EmitLDLOC(m_dwTargetEntryPointLocalNum); - } + else if (SF_IsCOMStub(dwStubFlags)) + { + // this is a CLR -> COM call + // the target has been computed by StubHelpers::GetCOMIPFromRCW + pcsEmit->EmitLDLOC(m_dwTargetEntryPointLocalNum); + } #endif // FEATURE_COMINTEROP + else if (SF_IsCALLIStub(dwStubFlags)) // unmanaged CALLI + { + // for managed-to-unmanaged CALLI that requires marshaling, the target is passed + // as the secret argument to the stub by GenericPInvokeCalliHelper (asmhelpers.asm) + EmitLoadStubContext(pcsEmit, dwStubFlags); +#ifdef TARGET_64BIT + // the secret arg has been shifted to left and ORed with 1 (see code:GenericPInvokeCalliHelper) + pcsEmit->EmitLDC(1); + pcsEmit->EmitSHR_UN(); +#endif // TARGET_64BIT + } + else // forward P/Invoke + { + _ASSERTE(pMD->IsPInvoke()); + PInvokeMethodDesc* pTargetMD = (PInvokeMethodDesc*)pMD; + pcsEmit->EmitLDC((DWORD_PTR)&pTargetMD->m_pPInvokeTarget); + pcsEmit->EmitCONV_I(); + pcsEmit->EmitLDIND_I(); } } else // native-to-managed @@ -2178,7 +2215,7 @@ void PInvokeStubLinker::EmitLogNativeArgument(ILCodeStream* pslILEmit, DWORD dwP { STANDARD_VM_CONTRACT; - if (SF_IsForwardPInvokeStub(m_dwStubFlags) && !SF_IsForwardDelegateStub(m_dwStubFlags)) + if (SF_IsCALLIStub(m_dwStubFlags)) { // get the secret argument via intrinsic pslILEmit->EmitCALL(METHOD__STUBHELPERS__GET_STUB_CONTEXT, 0, 1); @@ -2235,21 +2272,30 @@ DWORD PInvokeStubLinker::EmitProfilerBeginTransitionCallback(ILCodeStream* pcsEm { STANDARD_VM_CONTRACT; - if (!SF_IsForwardDelegateStub(dwStubFlags) && !SF_IsCALLIStub(dwStubFlags)) - { - // COM interop or the pinvoke case, should have a non-null 'secret argument'. - EmitLoadStubContext(pcsEmit, dwStubFlags); - } - else if (SF_IsDelegateStub(dwStubFlags)) + if (SF_IsDelegateStub(dwStubFlags)) { // In the unmanaged delegate case, we use the "this" object to retrieve the MD _ASSERTE(SF_IsForwardStub(dwStubFlags)); pcsEmit->EmitLoadThis(); pcsEmit->EmitCALL(METHOD__DELEGATE__GET_INVOKE_METHOD, 1, 1); } +#ifdef FEATURE_COMINTEROP + else if (SF_IsCOMStub(dwStubFlags)) + { + // COM interop should have a non-null 'secret argument'. + EmitLoadStubContext(pcsEmit, dwStubFlags); + } +#endif // FEATURE_COMINTEROP + else if (SF_IsForwardPInvokeStub(dwStubFlags) && !SF_IsCALLIStub(dwStubFlags)) + { + MethodDesc* pMD = GetTargetMD(); + _ASSERTE(pMD != NULL && pMD->IsPInvoke()); + pcsEmit->EmitLDC((DWORD_PTR)pMD); + pcsEmit->EmitCONV_I(); + } else { - // It is the calli pinvoke case, so pass null. + // Some other stub without the MD as the secret parameter, so pass null. pcsEmit->EmitLoadNullPtr(); } @@ -2283,14 +2329,15 @@ void PInvokeStubLinker::EmitValidateLocal(ILCodeStream* pcsEmit, DWORD dwLocalNu pcsEmit->EmitLoadThis(); pcsEmit->EmitCALL(METHOD__DELEGATE__GET_INVOKE_METHOD, 1, 1); } - else if (SF_IsCALLIStub(dwStubFlags)) +#ifdef FEATURE_COMINTEROP + else if (SF_IsCOMStub(dwStubFlags)) { - pcsEmit->EmitLoadNullPtr(); + EmitLoadStubContext(pcsEmit, dwStubFlags); } +#endif // FEATURE_COMINTEROP else { - // P/Invoke, CLR->COM - EmitLoadStubContext(pcsEmit, dwStubFlags); + pcsEmit->EmitLoadNullPtr(); } if (fIsByref) @@ -2427,7 +2474,7 @@ class DispatchStubState : public StubState // For CLR-to-COM late-bound/eventing } #endif // FEATURE_COMINTEROP - void EmitInvokeTarget(MethodDesc *pStubMD) + void EmitInvokeTarget(MethodDesc* pTargetMD, MethodDesc* pStubMD) { LIMITED_METHOD_CONTRACT; UNREACHABLE_MSG("Should never come to DispatchStubState::EmitInvokeTarget"); @@ -3992,10 +4039,13 @@ namespace INT32 m_iLCIDArg; INT32 m_nParams; - BYTE m_rgbSigAndParamData[1]; + + // Fields added to the end of the blob (see CreateHashBlob): + // // (dwParamAttr, cbNativeType) // length: number of parameters // NativeTypeBlob // length: number of parameters // BYTE m_rgbSigData[]; // length: determined by sig walk + BYTE m_rgbSigAndParamData[1]; }; // For better performance and less memory fragmentation, @@ -4355,7 +4405,7 @@ namespace CONTRACTL_END; WORD ndirectflags = 0; - if (pNMD->MethodDesc::IsVarArg()) + if (pNMD->IsVarArg()) ndirectflags |= PInvokeMethodDesc::kVarArgs; if (sigInfo.GetCharSet() == nltAnsi) @@ -4936,7 +4986,7 @@ namespace UNREACHABLE_MSG("unexpected deadlock in IL stub generation!"); } - if (SF_IsSharedStub(params.m_dwStubFlags)) + if (SF_IsSharedStub(dwStubFlags)) { // We need to re-acquire the lock in case we need to get a new pStubMD // in the case that the owner of the shared stub was destroyed. @@ -5908,65 +5958,9 @@ EXTERN_C LPVOID STDCALL PInvokeImportWorker(PInvokeMethodDesc* pMD) // Support for Pinvoke Calli instruction // //=========================================================================== - -EXTERN_C void STDCALL VarargPInvokeStubWorker(TransitionBlock * pTransitionBlock, VASigCookie *pVASigCookie, MethodDesc *pMD) +static void GetILStubForCalli(VASigCookie* pVASigCookie, MethodDesc* pMD) { - PreserveLastErrorHolder preserveLastError; - - STATIC_CONTRACT_THROWS; - STATIC_CONTRACT_GC_TRIGGERS; - STATIC_CONTRACT_MODE_COOPERATIVE; - STATIC_CONTRACT_ENTRY_POINT; - - MAKE_CURRENT_THREAD_AVAILABLE(); - -#ifdef _DEBUG - Thread::ObjectRefFlush(CURRENT_THREAD); -#endif - - PrestubMethodFrame frame(pTransitionBlock, pMD); - PrestubMethodFrame * pFrame = &frame; - - pFrame->Push(CURRENT_THREAD); - - _ASSERTE(pVASigCookie == pFrame->GetVASigCookie()); - _ASSERTE(pMD == pFrame->GetFunction()); - - GetILStubForCalli(pVASigCookie, pMD); - - pFrame->Pop(CURRENT_THREAD); -} - -EXTERN_C void STDCALL GenericPInvokeCalliStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget) -{ - PreserveLastErrorHolder preserveLastError; - - STATIC_CONTRACT_THROWS; - STATIC_CONTRACT_GC_TRIGGERS; - STATIC_CONTRACT_MODE_COOPERATIVE; - STATIC_CONTRACT_ENTRY_POINT; - - MAKE_CURRENT_THREAD_AVAILABLE(); - -#ifdef _DEBUG - Thread::ObjectRefFlush(CURRENT_THREAD); -#endif - - PInvokeCalliFrame frame(pTransitionBlock, pVASigCookie, pUnmanagedTarget); - PInvokeCalliFrame * pFrame = &frame; - - pFrame->Push(CURRENT_THREAD); - - _ASSERTE(pVASigCookie == pFrame->GetVASigCookie()); - - GetILStubForCalli(pVASigCookie, NULL); - - pFrame->Pop(CURRENT_THREAD); -} - -PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) -{ - CONTRACT(PCODE) + CONTRACTL { THROWS; GC_TRIGGERS; @@ -5974,9 +5968,8 @@ PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) MODE_ANY; PRECONDITION(CheckPointer(pVASigCookie)); PRECONDITION(CheckPointer(pMD, NULL_OK)); - POSTCONDITION(RETVAL != NULL); } - CONTRACT_END; + CONTRACTL_END; PCODE pTempILStub = (PCODE)NULL; @@ -5996,10 +5989,8 @@ PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) DWORD dwStubFlags = PINVOKESTUB_FL_BESTFIT; - // The MethodDesc pointer may in fact be the unmanaged target, see PInvokeStubs.asm. - if (pMD == NULL || (UINT_PTR)pMD & 0x1) + if (pMD == NULL) { - pMD = NULL; dwStubFlags |= PINVOKESTUB_FL_UNMANAGED_CALLI; // need to convert the CALLI signature to stub signature with managed calling convention @@ -6049,9 +6040,9 @@ PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) unmgdCallConv = CorInfoCallConvExtension::C; } - mdMethodDef md; - CorNativeLinkFlags nlFlags; - CorNativeLinkType nlType; + mdMethodDef md = mdMethodDefNil; + CorNativeLinkFlags nlFlags = nlfNone; + CorNativeLinkType nlType = nltAnsi; if (pMD != NULL) { @@ -6065,12 +6056,6 @@ PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) nlFlags = sigInfo.GetLinkFlags(); nlType = sigInfo.GetCharSet(); } - else - { - md = mdMethodDefNil; - nlFlags = nlfNone; - nlType = nltAnsi; - } StubSigDesc sigDesc(pMD, signature, pVASigCookie->pModule, pVASigCookie->pLoaderModule); sigDesc.InitTypeContext(pVASigCookie->classInst, pVASigCookie->methodInst); @@ -6089,8 +6074,64 @@ PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD) UNINSTALL_UNWIND_AND_CONTINUE_HANDLER; UNINSTALL_MANAGED_EXCEPTION_DISPATCHER; +} - RETURN pVASigCookie->pPInvokeILStub; +EXTERN_C void STDCALL VarargPInvokeStubWorker(TransitionBlock* pTransitionBlock, VASigCookie* pVASigCookie) +{ + PreserveLastErrorHolder preserveLastError; + + STATIC_CONTRACT_THROWS; + STATIC_CONTRACT_GC_TRIGGERS; + STATIC_CONTRACT_MODE_COOPERATIVE; + STATIC_CONTRACT_ENTRY_POINT; + + MAKE_CURRENT_THREAD_AVAILABLE(); + +#ifdef _DEBUG + Thread::ObjectRefFlush(CURRENT_THREAD); +#endif + + MethodDesc* pMD = pVASigCookie->pMethodDesc; + _ASSERTE(pMD != NULL); + + PrestubMethodFrame frame(pTransitionBlock, pMD); + PrestubMethodFrame * pFrame = &frame; + + pFrame->Push(CURRENT_THREAD); + + _ASSERTE(pVASigCookie == pFrame->GetVASigCookie()); + _ASSERTE(pMD == pFrame->GetFunction()); + + GetILStubForCalli(pVASigCookie, pMD); + + pFrame->Pop(CURRENT_THREAD); +} + +EXTERN_C void STDCALL GenericPInvokeCalliStubWorker(TransitionBlock * pTransitionBlock, VASigCookie * pVASigCookie, PCODE pUnmanagedTarget) +{ + PreserveLastErrorHolder preserveLastError; + + STATIC_CONTRACT_THROWS; + STATIC_CONTRACT_GC_TRIGGERS; + STATIC_CONTRACT_MODE_COOPERATIVE; + STATIC_CONTRACT_ENTRY_POINT; + + MAKE_CURRENT_THREAD_AVAILABLE(); + +#ifdef _DEBUG + Thread::ObjectRefFlush(CURRENT_THREAD); +#endif + + PInvokeCalliFrame frame(pTransitionBlock, pVASigCookie, pUnmanagedTarget); + PInvokeCalliFrame * pFrame = &frame; + + pFrame->Push(CURRENT_THREAD); + + _ASSERTE(pVASigCookie == pFrame->GetVASigCookie()); + + GetILStubForCalli(pVASigCookie, NULL); + + pFrame->Pop(CURRENT_THREAD); } namespace diff --git a/src/coreclr/vm/dllimport.h b/src/coreclr/vm/dllimport.h index c9a079f727e106..ee60abdfc62b1e 100644 --- a/src/coreclr/vm/dllimport.h +++ b/src/coreclr/vm/dllimport.h @@ -168,7 +168,7 @@ enum PInvokeStubFlags PINVOKESTUB_FL_STUB_HAS_THIS = 0x00010000, PINVOKESTUB_FL_TARGET_HAS_THIS = 0x00020000, PINVOKESTUB_FL_CHECK_PENDING_EXCEPTION = 0x00040000, - // unused = 0x00080000, + PINVOKESTUB_FL_SHARED_STUB = 0x00080000, // unused = 0x00100000, // unused = 0x00200000, // unused = 0x00400000, @@ -197,7 +197,7 @@ enum ILStubTypes #ifdef FEATURE_INSTANTIATINGSTUB_AS_IL ILSTUB_UNBOXINGILSTUB = 0x80000005, ILSTUB_INSTANTIATINGSTUB = 0x80000006, -#endif +#endif // FEATURE_INSTANTIATINGSTUB_AS_IL ILSTUB_WRAPPERDELEGATE_INVOKE = 0x80000007, ILSTUB_TAILCALL_STOREARGS = 0x80000008, ILSTUB_TAILCALL_CALLTARGET = 0x80000009, @@ -207,12 +207,6 @@ enum ILStubTypes ILSTUB_ASYNC_RESUME = 0x8000000D, }; -#ifdef FEATURE_COMINTEROP -#define COM_ONLY(x) (x) -#else // FEATURE_COMINTEROP -#define COM_ONLY(x) false -#endif // FEATURE_COMINTEROP - inline bool SF_IsVarArgStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_CONVSIGASVARARG)); } inline bool SF_IsBestFit (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_BESTFIT)); } inline bool SF_IsThrowOnUnmappableChar (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_THROWONUNMAPPABLECHAR)); } @@ -225,6 +219,7 @@ inline bool SF_IsCALLIStub (DWORD dwStubFlags) { LIMITED_METHOD_CONT inline bool SF_IsForNumParamBytes (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_FOR_NUMPARAMBYTES)); } inline bool SF_IsStructMarshalStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_STRUCT_MARSHAL)); } inline bool SF_IsCheckPendingException (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_CHECK_PENDING_EXCEPTION)); } +inline bool SF_IsSharedStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_SHARED_STUB)); } inline bool SF_IsVirtualStaticMethodDispatchStub(DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return dwStubFlags == ILSTUB_STATIC_VIRTUAL_DISPATCH_STUB; } @@ -239,42 +234,29 @@ inline bool SF_IsWrapperDelegateStub (DWORD dwStubFlags) { LIMITED_METHOD_CON #ifdef FEATURE_INSTANTIATINGSTUB_AS_IL inline bool SF_IsUnboxingILStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_UNBOXINGILSTUB); } inline bool SF_IsInstantiatingStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_INSTANTIATINGSTUB); } -#endif +#endif // FEATURE_INSTANTIATINGSTUB_AS_IL inline bool SF_IsTailCallStoreArgsStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_TAILCALL_STOREARGS); } inline bool SF_IsTailCallCallTargetStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_TAILCALL_CALLTARGET); } inline bool SF_IsDelegateShuffleThunk (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_DELEGATE_SHUFFLE_THUNK); } inline bool SF_IsAsyncResumeStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags == ILSTUB_ASYNC_RESUME); } -inline bool SF_IsCOMStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COM)); } -inline bool SF_IsCOMLateBoundStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COMLATEBOUND)); } -inline bool SF_IsCOMEventCallStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COMEVENTCALL)); } -inline bool SF_IsFieldGetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_FIELDGETTER)); } -inline bool SF_IsFieldSetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return COM_ONLY(dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_FIELDSETTER)); } - -inline bool SF_IsSharedStub(DWORD dwStubFlags) -{ - WRAPPER_NO_CONTRACT; - - if (SF_IsTailCallStoreArgsStub(dwStubFlags) || SF_IsTailCallCallTargetStub(dwStubFlags)) - { - return false; - } - - if (SF_IsFieldGetterStub(dwStubFlags) || SF_IsFieldSetterStub(dwStubFlags)) - { - return false; - } - - if (SF_IsAsyncResumeStub(dwStubFlags)) - { - return false; - } - - return true; -} +#ifdef FEATURE_COMINTEROP +inline bool SF_IsCOMStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COM)); } +inline bool SF_IsCOMLateBoundStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COMLATEBOUND)); } +inline bool SF_IsCOMEventCallStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_COMEVENTCALL)); } +inline bool SF_IsFieldGetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_FIELDGETTER)); } +inline bool SF_IsFieldSetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < PINVOKESTUB_FL_INVALID && 0 != (dwStubFlags & PINVOKESTUB_FL_FIELDSETTER)); } +#else // FEATURE_COMINTEROP +inline bool SF_IsCOMStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return false; } +inline bool SF_IsCOMLateBoundStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return false; } +inline bool SF_IsCOMEventCallStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return false; } +inline bool SF_IsFieldGetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return false; } +inline bool SF_IsFieldSetterStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return false; } +#endif // FEATURE_COMINTEROP inline bool SF_IsForwardStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return !SF_IsReverseStub(dwStubFlags); } +// Forward PInvoke stub check will include CALLI. inline bool SF_IsForwardPInvokeStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (!SF_IsCOMStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags)); } inline bool SF_IsReversePInvokeStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (!SF_IsCOMStub(dwStubFlags) && SF_IsReverseStub(dwStubFlags)); } @@ -284,8 +266,6 @@ inline bool SF_IsReverseCOMStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRA inline bool SF_IsForwardDelegateStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsDelegateStub(dwStubFlags) && SF_IsForwardStub(dwStubFlags)); } inline bool SF_IsReverseDelegateStub (DWORD dwStubFlags) { WRAPPER_NO_CONTRACT; return (SF_IsDelegateStub(dwStubFlags) && SF_IsReverseStub(dwStubFlags)); } -#undef COM_ONLY - inline void SF_ConsistencyCheck(DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; @@ -588,8 +568,6 @@ class PInvokeStubLinker : public ILStubLinker BOOL HeuristicDoesThisLookLikeAGetLastErrorCall(LPBYTE pTarget); DWORD STDMETHODCALLTYPE FalseGetLastError(); -PCODE GetILStubForCalli(VASigCookie *pVASigCookie, MethodDesc *pMD); - PCODE JitILStub(MethodDesc* pStubMD); PCODE GetStubForInteropMethod(MethodDesc* pMD, DWORD dwStubFlags = 0); diff --git a/src/coreclr/vm/dynamicmethod.cpp b/src/coreclr/vm/dynamicmethod.cpp index 0af6c360bf7973..f8641ebfcd3960 100644 --- a/src/coreclr/vm/dynamicmethod.cpp +++ b/src/coreclr/vm/dynamicmethod.cpp @@ -322,7 +322,7 @@ HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EECodeGenMana GC_NOTRIGGER; MODE_ANY; INJECT_FAULT(COMPlusThrowOM()); - POSTCONDITION((RETVAL != NULL) || !pInfo->getThrowOnOutOfMemoryWithinRange()); + POSTCONDITION((RETVAL != NULL) || !pInfo->GetThrowOnOutOfMemoryWithinRange()); } CONTRACT_END; @@ -331,7 +331,7 @@ HeapList* HostCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, EECodeGenMana HeapList *pHp = pCodeHeap->InitializeHeapList(pInfo); if (pHp == NULL) { - _ASSERTE(!pInfo->getThrowOnOutOfMemoryWithinRange()); + _ASSERTE(!pInfo->GetThrowOnOutOfMemoryWithinRange()); RETURN NULL; } @@ -390,7 +390,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) } CONTRACTL_END; - size_t ReserveBlockSize = pInfo->getRequestSize(); + size_t ReserveBlockSize = pInfo->GetRequestSize(); // Add TrackAllocation, HeapList and very conservative padding to make sure we have enough for the allocation ReserveBlockSize += sizeof(TrackAllocation) + HOST_CODEHEAP_SIZE_ALIGN + 0x100; @@ -402,12 +402,12 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) // reserve ReserveBlockSize rounded-up to VIRTUAL_ALLOC_RESERVE_GRANULARITY of memory ReserveBlockSize = ALIGN_UP(ReserveBlockSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); - if (pInfo->m_loAddr != NULL || pInfo->m_hiAddr != NULL) + if (pInfo->GetLoAddr() != NULL || pInfo->GetHiAddr() != NULL) { - m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(ReserveBlockSize, pInfo->m_loAddr, pInfo->m_hiAddr); + m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->ReserveWithinRange(ReserveBlockSize, pInfo->GetLoAddr(), pInfo->GetHiAddr()); if (!m_pBaseAddr) { - if (pInfo->getThrowOnOutOfMemoryWithinRange()) + if (pInfo->GetThrowOnOutOfMemoryWithinRange()) ThrowOutOfMemoryWithinRange(); return NULL; } @@ -415,7 +415,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) else { // top up the ReserveBlockSize to suggested minimum - ReserveBlockSize = max(ReserveBlockSize, pInfo->getReserveSize()); + ReserveBlockSize = max(ReserveBlockSize, pInfo->GetReserveSize()); m_pBaseAddr = (BYTE*)ExecutableAllocator::Instance()->Reserve(ReserveBlockSize); if (!m_pBaseAddr) @@ -425,7 +425,7 @@ HeapList* HostCodeHeap::InitializeHeapList(CodeHeapRequestInfo *pInfo) m_pLastAvailableCommittedAddr = m_pBaseAddr; m_TotalBytesAvailable = ReserveBlockSize; m_ApproximateLargestBlock = ReserveBlockSize; - m_pAllocator = pInfo->m_pAllocator; + m_pAllocator = pInfo->GetAllocator(); HeapList* pHp = new HeapList; diff --git a/src/coreclr/vm/frames.h b/src/coreclr/vm/frames.h index 31b5e321584f46..25de8c5d98faa3 100644 --- a/src/coreclr/vm/frames.h +++ b/src/coreclr/vm/frames.h @@ -2203,10 +2203,10 @@ class InlinedCallFrame : public Frame void UpdateRegDisplay_Impl(const PREGDISPLAY, bool updateFloats = false); - // m_Datum contains MethodDesc ptr or + // m_Datum contains PInvokeMethodDesc ptr or // - on 64 bit host: CALLI target address (if lowest bit is set) // - on windows x86 host: argument stack size (if value is <64k) - // When m_Datum contains MethodDesc ptr, then on other than windows x86 host + // When m_Datum contains PInvokeMethodDesc ptr, then on other than windows x86 host // - bit 1 set indicates invoking new exception handling helpers // - bit 2 indicates CallCatchFunclet or CallFinallyFunclet // See code:HasFunction. diff --git a/src/coreclr/vm/i386/asmhelpers.S b/src/coreclr/vm/i386/asmhelpers.S index 8d578f3243c5a8..a7dd7f2bf0adfc 100644 --- a/src/coreclr/vm/i386/asmhelpers.S +++ b/src/coreclr/vm/i386/asmhelpers.S @@ -384,7 +384,6 @@ LOCAL_LABEL(GoCallVarargWorker): // save pMD push eax - push eax // pMD push dword ptr [esi + 4*7] // pVaSigCookie push esi // pTransitionBlock diff --git a/src/coreclr/vm/i386/asmhelpers.asm b/src/coreclr/vm/i386/asmhelpers.asm index 798fefe9f564a4..fbbd8de5d12fbd 100644 --- a/src/coreclr/vm/i386/asmhelpers.asm +++ b/src/coreclr/vm/i386/asmhelpers.asm @@ -46,7 +46,7 @@ endif ; FEATURE_EH_FUNCLETS EXTERN __alloca_probe:PROC EXTERN _PInvokeImportWorker@4:PROC -EXTERN _VarargPInvokeStubWorker@12:PROC +EXTERN _VarargPInvokeStubWorker@8:PROC EXTERN _GenericPInvokeCalliStubWorker@12:PROC EXTERN _PreStubWorker@8:PROC @@ -930,11 +930,10 @@ GoCallVarargWorker: ; save pMD push eax - push eax ; pMD push dword ptr [esi + 4*7] ; pVaSigCookie push esi ; pTransitionBlock - call _VarargPInvokeStubWorker@12 + call _VarargPInvokeStubWorker@8 ; restore pMD pop eax diff --git a/src/coreclr/vm/jitinterface.cpp b/src/coreclr/vm/jitinterface.cpp index f7a238c5ce7373..da018369afecf0 100644 --- a/src/coreclr/vm/jitinterface.cpp +++ b/src/coreclr/vm/jitinterface.cpp @@ -6166,7 +6166,13 @@ CORINFO_VARARGS_HANDLE CEEInfo::getVarArgsHandle(CORINFO_SIG_INFO *sig, Instantiation methodInst = Instantiation((TypeHandle*) sig->sigInst.methInst, sig->sigInst.methInstCount); SigTypeContext typeContext = SigTypeContext(classInst, methodInst); - result = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig->pSig, sig->cbSig), &typeContext)); + MethodDesc* pMD = GetMethod(methHnd); + if (pMD != NULL && !pMD->IsPInvoke()) + { + pMD = NULL; + } + + result = CORINFO_VARARGS_HANDLE(module->GetVASigCookie(Signature(sig->pSig, sig->cbSig), pMD, &typeContext)); EE_TO_JIT_TRANSITION(); diff --git a/src/coreclr/vm/method.cpp b/src/coreclr/vm/method.cpp index 349a05ec3c5396..2511b537097e0c 100644 --- a/src/coreclr/vm/method.cpp +++ b/src/coreclr/vm/method.cpp @@ -2462,20 +2462,19 @@ MethodImpl *MethodDesc::GetMethodImpl() #ifndef DACCESS_COMPILE //******************************************************************************* -BOOL MethodDesc::RequiresMethodDescCallingConvention(BOOL fEstimateForChunk /*=FALSE*/) +BOOL MethodDesc::RequiresMDContextArg() const { LIMITED_METHOD_CONTRACT; // Interop marshaling is implemented using shared stubs - if (IsPInvoke() || IsCLRToCOMCall()) + if (IsCLRToCOMCall()) return TRUE; - return FALSE; } //******************************************************************************* -BOOL MethodDesc::RequiresStableEntryPoint(BOOL fEstimateForChunk /*=FALSE*/) +BOOL MethodDesc::RequiresStableEntryPoint() { BYTE bFlags4 = VolatileLoadWithoutBarrier(&m_bFlags4); if (bFlags4 & enum_flag4_ComputedRequiresStableEntryPoint) @@ -2484,16 +2483,14 @@ BOOL MethodDesc::RequiresStableEntryPoint(BOOL fEstimateForChunk /*=FALSE*/) } else { - if (fEstimateForChunk) - return RequiresStableEntryPointCore(fEstimateForChunk); - BOOL fRequiresStableEntryPoint = RequiresStableEntryPointCore(FALSE); + BOOL fRequiresStableEntryPoint = RequiresStableEntryPointCore(); BYTE requiresStableEntrypointFlags = (BYTE)(enum_flag4_ComputedRequiresStableEntryPoint | (fRequiresStableEntryPoint ? enum_flag4_RequiresStableEntryPoint : 0)); InterlockedUpdateFlags4(requiresStableEntrypointFlags, TRUE); return fRequiresStableEntryPoint; } } -BOOL MethodDesc::RequiresStableEntryPointCore(BOOL fEstimateForChunk) +BOOL MethodDesc::RequiresStableEntryPointCore() { LIMITED_METHOD_CONTRACT; @@ -2509,26 +2506,17 @@ BOOL MethodDesc::RequiresStableEntryPointCore(BOOL fEstimateForChunk) if (IsLCGMethod()) return TRUE; - if (fEstimateForChunk) - { - // Make a best guess based on the method table of the chunk. - if (IsInterface()) - return TRUE; - } - else - { - // Wrapper stubs are stored in generic dictionary that's not backpatched - if (IsWrapperStub()) - return TRUE; + // Wrapper stubs are stored in generic dictionary that's not backpatched + if (IsWrapperStub()) + return TRUE; - // TODO: Can we avoid early allocation of precodes for interfaces and cominterop? - if ((IsInterface() && !IsStatic() && IsVirtual()) || IsCLRToCOMCall()) - return TRUE; + // TODO: Can we avoid early allocation of precodes for interfaces and cominterop? + if ((IsInterface() && !IsStatic() && IsVirtual()) || IsCLRToCOMCall()) + return TRUE; - // FCalls need stable entrypoint that can be mapped back to MethodDesc - if (IsFCall()) - return TRUE; - } + // FCalls need stable entrypoint that can be mapped back to MethodDesc + if (IsFCall()) + return TRUE; return FALSE; } @@ -3903,7 +3891,7 @@ PrecodeType MethodDesc::GetPrecodeType() PrecodeType precodeType = PRECODE_INVALID; #ifdef HAS_FIXUP_PRECODE - if (!RequiresMethodDescCallingConvention()) + if (!RequiresMDContextArg()) { // Use the more efficient fixup precode if possible precodeType = PRECODE_FIXUP; diff --git a/src/coreclr/vm/method.hpp b/src/coreclr/vm/method.hpp index 1438276e35b42c..a46c5a09e444ad 100644 --- a/src/coreclr/vm/method.hpp +++ b/src/coreclr/vm/method.hpp @@ -742,14 +742,14 @@ class MethodDesc BOOL ShouldSuppressGCTransition(); #ifdef FEATURE_COMINTEROP - inline DWORD IsCLRToCOMCall() + inline DWORD IsCLRToCOMCall() const { WRAPPER_NO_CONTRACT; return mcComInterop == GetClassification(); } #else // !FEATURE_COMINTEROP // hardcoded to return FALSE to improve code readability - inline DWORD IsCLRToCOMCall() + inline DWORD IsCLRToCOMCall() const { LIMITED_METHOD_CONTRACT; return FALSE; @@ -1718,14 +1718,14 @@ class MethodDesc // Running the Prestub preparation step. // The stub produced by prestub requires method desc to be passed - // in dedicated register. Used to implement stubs shared between - // MethodDescs (e.g. PInvoke stubs) - BOOL RequiresMethodDescCallingConvention(BOOL fEstimateForChunk = FALSE); + // in dedicated register. + // See HasMDContextArg() for the related stub version. + BOOL RequiresMDContextArg() const; // Returns true if the method has to have stable entrypoint always. - BOOL RequiresStableEntryPoint(BOOL fEstimateForChunk = FALSE); + BOOL RequiresStableEntryPoint(); private: - BOOL RequiresStableEntryPointCore(BOOL fEstimateForChunk); + BOOL RequiresStableEntryPointCore(); public: // @@ -2868,7 +2868,7 @@ class DynamicMethodDesc : public StoredSigMethodDesc LIMITED_METHOD_CONTRACT; _ASSERTE(IsILStub()); return HasFlags(FlagStatic) - && !HasFlags(FlagIsCALLI) + && !HasFlags(FlagIsCALLI | FlagIsDelegate) && GetILStubType() == StubCLRToNativeInterop; } @@ -2906,10 +2906,11 @@ class DynamicMethodDesc : public StoredSigMethodDesc } // Whether the stub takes a context argument that is an interop MethodDesc. + // See RequiresMDContextArg() for the non-stub version. bool HasMDContextArg() const { LIMITED_METHOD_CONTRACT; - return IsCLRToCOMStub() || (IsPInvokeStub() && !HasFlags(FlagIsDelegate)); + return IsCLRToCOMStub(); } // diff --git a/src/coreclr/vm/prestub.cpp b/src/coreclr/vm/prestub.cpp index 9336a11d44f317..d1f5d7d9415b2e 100644 --- a/src/coreclr/vm/prestub.cpp +++ b/src/coreclr/vm/prestub.cpp @@ -375,31 +375,6 @@ PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig) if (pConfig->MayUsePrecompiledCode()) { -#ifdef FEATURE_READYTORUN - if (IsDynamicMethod() && GetLoaderModule()->IsSystem() && MayUsePrecompiledILStub()) - { - // Images produced using crossgen2 have non-shareable pinvoke stubs which can't be used with the IL - // stubs that the runtime generates (they take no secret parameter, and each pinvoke has a separate code) - if (GetModule()->IsReadyToRun() && !GetModule()->GetReadyToRunInfo()->HasNonShareablePInvokeStubs()) - { - DynamicMethodDesc* stubMethodDesc = this->AsDynamicMethodDesc(); - if (stubMethodDesc->IsILStub() && stubMethodDesc->IsPInvokeStub()) - { - MethodDesc* pTargetMD = stubMethodDesc->GetILStubResolver()->GetStubTargetMethodDesc(); - if (pTargetMD != NULL) - { - pCode = pTargetMD->GetPrecompiledR2RCode(pConfig); - if (pCode != (PCODE)NULL) - { - LOG_USING_R2R_CODE(this); - pConfig->SetNativeCode(pCode, &pCode); - } - } - } - } - } -#endif // FEATURE_READYTORUN - if (pCode == (PCODE)NULL) { pCode = GetPrecompiledCode(pConfig, shouldTier); @@ -2279,8 +2254,9 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT, CallerGCMode callerGCMo } // end else if (IsIL() || IsNoMetadata()) else if (IsPInvoke()) { - if (GetModule()->IsReadyToRun() && GetModule()->GetReadyToRunInfo()->HasNonShareablePInvokeStubs() && MayUsePrecompiledILStub()) + if (GetModule()->IsReadyToRun() && MayUsePrecompiledILStub()) { + _ASSERTE(GetModule()->GetReadyToRunInfo()->HasNonShareablePInvokeStubs()); // In crossgen2, we compile non-shareable IL stubs for pinvokes. If we can find code for such // a stub, we'll use it directly instead and avoid emitting an IL stub. PrepareCodeConfig config(NativeCodeVersion(this), TRUE, TRUE); @@ -2944,7 +2920,7 @@ static PCODE getHelperForSharedStatic(Module * pModule, ReadyToRunFixupKind kind } pArgs->offset = pFD->GetOffset(); - BinderMethodID managedHelperId = fUnbox ? + BinderMethodID managedHelperId = fUnbox ? METHOD__STATICSHELPERS__STATICFIELDADDRESSUNBOX_DYNAMIC : METHOD__STATICSHELPERS__STATICFIELDADDRESS_DYNAMIC; diff --git a/src/coreclr/vm/stackwalk.cpp b/src/coreclr/vm/stackwalk.cpp index 3376d945e0ca91..d7df3170dc8583 100644 --- a/src/coreclr/vm/stackwalk.cpp +++ b/src/coreclr/vm/stackwalk.cpp @@ -2988,17 +2988,6 @@ BOOL StackFrameIterator::CheckForSkippedFrames(void) (dac_cast(m_crawl.pFrame) < pvReferenceSP) ) { - BOOL fReportInteropMD = - // If we see InlinedCallFrame in certain IL stubs, we should report the MD that - // was passed to the stub as its secret argument. This is the true interop MD. - // Note that code:InlinedCallFrame.GetFunction may return NULL in this case because - // the call is made using the CALLI instruction. - m_crawl.pFrame != FRAME_TOP && - m_crawl.pFrame->GetFrameIdentifier() == FrameIdentifier::InlinedCallFrame && - m_crawl.pFunc != NULL && - m_crawl.pFunc->IsILStub() && - m_crawl.pFunc->AsDynamicMethodDesc()->HasMDContextArg(); - if (fHandleSkippedFrames) { m_crawl.GotoNextFrame(); @@ -3019,6 +3008,16 @@ BOOL StackFrameIterator::CheckForSkippedFrames(void) { m_crawl.isFrameless = false; + // If we see InlinedCallFrame in certain IL stubs, we should report the MD that + // was passed to the stub as its secret argument. This is the true interop MD. + // Note that code:InlinedCallFrame.GetFunction may return NULL in this case because + // the call is made using the CALLI instruction. + bool fReportInteropMD = + m_crawl.pFrame != FRAME_TOP + && m_crawl.pFrame->GetFrameIdentifier() == FrameIdentifier::InlinedCallFrame + && m_crawl.pFunc != NULL + && m_crawl.pFunc->IsILStub() + && m_crawl.pFunc->AsDynamicMethodDesc()->HasMDContextArg(); if (fReportInteropMD) { m_crawl.pFunc = ((PTR_InlinedCallFrame)m_crawl.pFrame)->GetActualInteropMethodDesc(); diff --git a/src/coreclr/vm/stubmgr.cpp b/src/coreclr/vm/stubmgr.cpp index 19159828008faa..c612b6f8abddd6 100644 --- a/src/coreclr/vm/stubmgr.cpp +++ b/src/coreclr/vm/stubmgr.cpp @@ -402,7 +402,7 @@ BOOL StubManager::IsSingleOwner(PCODE stubAddress, StubManager * pOwner) if (it.Current()->CheckIsStub_Worker(stubAddress)) { // If you hit this assert, you can tell what 2 stub managers are conflicting by inspecting their vtable. - CONSISTENCY_CHECK_MSGF((it.Current() == pOwner), ("Stub at 0x%p is owner by multiple managers (0x%p, 0x%p)", + CONSISTENCY_CHECK_MSGF((it.Current() == pOwner), ("Stub at %p is owner by multiple managers (%p, %p)", (void*) stubAddress, pOwner, it.Current())); count++; } @@ -647,7 +647,7 @@ void StubManager::AddStubManager(StubManager *mgr) g_pFirstManager = mgr; } - LOG((LF_CORDB, LL_EVERYTHING, "StubManager::AddStubManager - 0x%p (vptr %p)\n", mgr, (*(PVOID*)mgr))); + LOG((LF_CORDB, LL_EVERYTHING, "StubManager::AddStubManager - %p (vptr %p)\n", mgr, (*(PVOID*)mgr))); } //----------------------------------------------------------- @@ -793,7 +793,7 @@ void StubManager::DbgBeginLog(TADDR addrCallInstruction, TADDR addrCallTarget) EX_END_CATCH } - DbgWriteLog("Beginning Step-in. IP after Call instruction is at 0x%p, call target is at 0x%p\n", + DbgWriteLog("Beginning Step-in. IP after Call instruction is at %p, call target is at %p\n", addrCallInstruction, addrCallTarget); #endif } @@ -1722,10 +1722,10 @@ BOOL ILStubManager::TraceManager(Thread *thread, PCODE stubIP = GetIP(pContext); *pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext); - DynamicMethodDesc *pStubMD = NonVirtualEntry2MethodDesc(stubIP)->AsDynamicMethodDesc(); + DynamicMethodDesc* pStubMD = NonVirtualEntry2MethodDesc(stubIP)->AsDynamicMethodDesc(); TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); - Object * pThis = StubManagerHelpers::GetThisPtr(pContext); - LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Enter: StubMD 0x%p, HiddenArg 0x%p, ThisPtr 0x%p\n", + Object* pThis = StubManagerHelpers::GetThisPtr(pContext); + LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Enter: StubMD %p, HiddenArg %p, ThisPtr %p\n", pStubMD, arg, pThis)); // See code:ILStubCache.CreateNewMethodDesc for the code that sets flags on stub MDs @@ -1742,13 +1742,13 @@ BOOL ILStubManager::TraceManager(Thread *thread, // This is reverse P/Invoke stub, the argument is UMEntryThunkData UMEntryThunkData *pEntryThunk = (UMEntryThunkData*)arg; target = pEntryThunk->GetManagedTarget(); - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Reverse P/Invoke case 0x%p\n", target)); + LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Reverse P/Invoke case %p\n", target)); } else { // This is COM-to-CLR stub, the argument is the target target = (PCODE)arg; - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: COM-to-CLR case 0x%p\n", target)); + LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: COM-to-CLR case %p\n", target)); } trace->InitForManaged(target); } @@ -1758,7 +1758,7 @@ BOOL ILStubManager::TraceManager(Thread *thread, DelegateObject *pDel = (DelegateObject *)pThis; target = pDel->GetMethodPtrAux(); - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward delegate P/Invoke case 0x%p\n", target)); + LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward delegate P/Invoke case %p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->HasFlags(DynamicMethodDesc::FlagIsCALLI)) @@ -1771,7 +1771,7 @@ BOOL ILStubManager::TraceManager(Thread *thread, target = target >> 1; // call target is encoded as (addr << 1) | 1 #endif // TARGET_AMD64 - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case 0x%p\n", target)); + LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Unmanaged CALLI case %p\n", target)); trace->InitForUnmanaged(target); } else if (pStubMD->IsStepThroughStub()) @@ -1783,7 +1783,7 @@ BOOL ILStubManager::TraceManager(Thread *thread, return FALSE; } - LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Step through to target - 0x%p\n", pTargetMD)); + LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Step through to target - %p\n", pTargetMD)); target = GetStubTarget(pTargetMD); if (target == (PCODE)NULL) return FALSE; @@ -1794,30 +1794,19 @@ BOOL ILStubManager::TraceManager(Thread *thread, { LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Hidden argument is MethodDesc\n")); - // This is either direct forward P/Invoke or a CLR-to-COM call, the argument is MD MethodDesc *pMD = (MethodDesc *)arg; - if (pMD->IsPInvoke()) - { - PInvokeMethodDesc* pNMD = reinterpret_cast(pMD); - _ASSERTE_IMPL(!pNMD->PInvokeTargetIsImportThunk()); - target = (PCODE)pNMD->GetPInvokeTarget(); - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: Forward P/Invoke case 0x%p\n", target)); - trace->InitForUnmanaged(target); - } + #ifdef FEATURE_COMINTEROP - else - { - LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub is CLR-to-COM\n")); - _ASSERTE(pMD->IsCLRToCOMCall()); - CLRToCOMCallMethodDesc *pCMD = (CLRToCOMCallMethodDesc *)pMD; - _ASSERTE(!pCMD->IsStatic() && !pCMD->IsCtor() && "Static methods and constructors are not supported for built-in classic COM"); + LOG((LF_CORDB, LL_INFO1000, "ILSM::TraceManager: Stub is CLR-to-COM\n")); + _ASSERTE(pMD->IsCLRToCOMCall()); + CLRToCOMCallMethodDesc *pCMD = (CLRToCOMCallMethodDesc *)pMD; + _ASSERTE(!pCMD->IsStatic() && !pCMD->IsCtor() && "Static methods and constructors are not supported for built-in classic COM"); - if (pThis != NULL) - { - target = GetCOMTarget(pThis, pCMD->m_pCLRToCOMCallInfo); - LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM case 0x%p\n", target)); - trace->InitForUnmanaged(target); - } + if (pThis != NULL) + { + target = GetCOMTarget(pThis, pCMD->m_pCLRToCOMCallInfo); + LOG((LF_CORDB, LL_INFO10000, "ILSM::TraceManager: CLR-to-COM case %p\n", target)); + trace->InitForUnmanaged(target); } #endif // FEATURE_COMINTEROP } @@ -1953,15 +1942,20 @@ BOOL InteropDispatchStubManager::TraceManager(Thread *thread, TADDR arg = StubManagerHelpers::GetHiddenArg(pContext); // IL stub may not exist at this point so we init directly for the target (TODO?) - - if (IsVarargPInvokeStub(GetIP(pContext))) + PCODE stubIP = GetIP(pContext); + if (IsVarargPInvokeStub(stubIP)) { #if defined(TARGET_ARM64) && defined(__APPLE__) //On ARM64 Mac, we cannot put a breakpoint inside of VarargPInvokeStub LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: Skipping on arm64-macOS\n")); return FALSE; #else - PInvokeMethodDesc *pNMD = (PInvokeMethodDesc *)arg; + TADDR firstArg = StubManagerHelpers::GetFirstArg(pContext); + _ASSERTE(firstArg != (TADDR)NULL); + + VASigCookie* vaSigCookie = (VASigCookie*)firstArg; + PInvokeMethodDesc* pNMD = reinterpret_cast(vaSigCookie->pMethodDesc); + _ASSERTE(pNMD != NULL); _ASSERTE(pNMD->IsPInvoke()); PCODE target = (PCODE)pNMD->GetPInvokeTarget(); @@ -1969,7 +1963,7 @@ BOOL InteropDispatchStubManager::TraceManager(Thread *thread, trace->InitForUnmanaged(target); #endif //defined(TARGET_ARM64) && defined(__APPLE__) } - else if (GetIP(pContext) == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) + else if (stubIP == GetEEFuncEntryPoint(GenericPInvokeCalliHelper)) { #if defined(TARGET_ARM64) && defined(__APPLE__) //On ARM64 Mac, we cannot put a breakpoint inside of GenericPInvokeCalliHelper @@ -2008,7 +2002,7 @@ BOOL InteropDispatchStubManager::TraceManager(Thread *thread, LPVOID *lpVtbl = *(LPVOID **)(IUnknown *)pUnk; PCODE target = (PCODE)lpVtbl[6]; // DISPATCH_INVOKE_SLOT; - LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: CLR-to-COM late-bound case 0x%p\n", target)); + LOG((LF_CORDB, LL_INFO10000, "IDSM::TraceManager: CLR-to-COM late-bound case %p\n", target)); trace->InitForUnmanaged(target); GCPROTECT_END(); diff --git a/src/coreclr/vm/stubmgr.h b/src/coreclr/vm/stubmgr.h index 4a9b449ff2e97c..d1a71ca46f83db 100644 --- a/src/coreclr/vm/stubmgr.h +++ b/src/coreclr/vm/stubmgr.h @@ -762,26 +762,31 @@ class StubManagerHelpers #endif } - static PTR_Object GetThisPtr(T_CONTEXT * pContext) + static TADDR GetFirstArg(T_CONTEXT * pContext) { #if defined(TARGET_X86) - return dac_cast(pContext->Ecx); + return (TADDR)pContext->Ecx; #elif defined(TARGET_AMD64) #ifdef UNIX_AMD64_ABI - return dac_cast(pContext->Rdi); + return (TADDR)pContext->Rdi; #else - return dac_cast(pContext->Rcx); + return (TADDR)pContext->Rcx; #endif #elif defined(TARGET_ARM) - return dac_cast((TADDR)pContext->R0); + return (TADDR)pContext->R0; #elif defined(TARGET_ARM64) - return dac_cast(pContext->X0); + return (TADDR)pContext->X0; #else - PORTABILITY_ASSERT("StubManagerHelpers::GetThisPtr"); - return NULL; + PORTABILITY_ASSERT("StubManagerHelpers::GetFirstArg"); + return (TADDR)0; #endif } + static PTR_Object GetThisPtr(T_CONTEXT * pContext) + { + return dac_cast(GetFirstArg(pContext)); + } + static PCODE GetTailCallTarget(T_CONTEXT * pContext) { #if defined(TARGET_X86)